diff --git a/.github/.probe/writer.txt b/.github/.probe/writer.txt new file mode 100644 index 000000000..31a0b6ebc --- /dev/null +++ b/.github/.probe/writer.txt @@ -0,0 +1,16 @@ +You are helpful senior technical writer. +Your role is to automatically assist with GitHub issues. + +Before jumping on the task or replying, you first should analyze the user request or issue details thoroughly. + +When responding: +1. Be concise but thorough in your responses. +2. If the issue description is unclear, ask clarifying questions. +3. Request any additional information you might need to better assist +4. Provide helpful information related to the query +5. Try to provide an elegant and concise solution. +6. If there are multiple different solutions or next steps, convey it in the response +7. If solution is clear, you can jump to implementation right away, if not, you can ask user a clarification question, by calling attempt_completion tool, with required details. + +When writing content: +Don’t use title case. Following sentence case conventions, like in Google and GitHub style guides. diff --git a/.github/workflows/autoupdate.yaml b/.github/workflows/autoupdate.yaml new file mode 100644 index 000000000..640230ba5 --- /dev/null +++ b/.github/workflows/autoupdate.yaml @@ -0,0 +1,17 @@ +# autoupdate is a GitHub Action that auto-updates pull requests branches whenever changes land on their destination branch. +name: autoupdate +on: + push: + branches: + - main +jobs: + autoupdate: + name: autoupdate + runs-on: ubuntu-22.04 + steps: + - uses: docker://chinthakagodawita/autoupdate-action:v1 + env: + GITHUB_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + # Only monitor PRs that are not currently in the draft state. + PR_READY_STATE: "ready_for_review" + MERGE_CONFLICT_ACTION: "ignore" # Possible option to prevent retrying failed merges diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index a3988f5b2..afed13134 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -1,19 +1,67 @@ name: Deploy Documentation on: - workflow_dispatch: # Manual trigger only + workflow_dispatch: inputs: subfolder: - description: 'Subfolder for documentation (e.g., docs3, docsv3, docs)' + description: 'Subfolder for documentation (leave empty for root deployment)' required: false - default: 'docs3' + default: '' type: string + triggering_pr_number: + description: 'PR number that triggered this deployment' + required: false + default: '' + type: string + triggering_pr_title: + description: 'PR title that triggered this deployment' + required: false + default: '' + type: string + triggering_commit_sha: + description: 'Commit SHA that triggered this deployment' + required: false + default: '' + type: string + triggering_branch: + description: 'Branch that triggered this deployment' + required: false + default: '' + type: string + original_pr_number: + description: 'Original PR number (if different from triggering PR)' + required: false + default: '' + type: string + +# Ensure only latest deployment runs +concurrency: + group: docs-deployment + cancel-in-progress: true jobs: merge-docs: runs-on: ubuntu-latest steps: + - name: Log deployment trigger information + run: | + echo "πŸš€ Starting documentation deployment" + echo "πŸ“‹ Trigger Information:" + echo " - Branch: ${{ inputs.triggering_branch || 'N/A' }}" + echo " - Commit: ${{ inputs.triggering_commit_sha || 'N/A' }}" + if [ -n "${{ inputs.triggering_pr_number }}" ]; then + echo " - PR: #${{ inputs.triggering_pr_number }} - ${{ inputs.triggering_pr_title }}" + if [ -n "${{ inputs.original_pr_number }}" ] && [ "${{ inputs.original_pr_number }}" != "${{ inputs.triggering_pr_number }}" ]; then + echo " - Original PR: #${{ inputs.original_pr_number }} (cherry-picked)" + fi + else + echo " - PR: N/A (direct push)" + fi + echo " - Timestamp: $(date)" + echo " - Workflow Run: ${{ github.run_id }}" + echo " - Subfolder: ${{ inputs.subfolder || '(root deployment)' }}" + - name: Checkout production branch uses: actions/checkout@v4 with: @@ -43,77 +91,69 @@ jobs: - name: Cleanup old version folders and assets run: | - echo "🧹 Cleaning up old version folders and assets..." - - # Get current version folders from config - current_folders=$(python3 -c "import json; config=json.load(open('branches-config.json')); folders=[v.get('folder','') for v in config.get('versions',[]) if v.get('folder','')]; print(' '.join(folders))") - - echo "πŸ“‹ Current version folders: $current_folders" - - # Define asset types that will be regenerated - asset_types=( - "style.css" - "images" - "img" - "logo" - "favicon.ico" - "favicon.png" - "snippets" + echo "🧹 Cleaning up all content except essential files..." + + # Define folders to keep (whitelist) + keep_folders=( + ".github" + ".git" + ".devcontainer" + "scripts" + ".vale" + ".probe" ) - # Remove old assets (they'll be regenerated from current versions) - echo "🎨 Cleaning up old assets..." - for asset in "${asset_types[@]}"; do - if [ -e "$asset" ]; then - echo "πŸ—‘οΈ Removing old asset: $asset" - rm -rf "$asset" - fi - done - - # Remove old docs.json (will be regenerated) - if [ -f "docs.json" ]; then - echo "πŸ—‘οΈ Removing old docs.json" - rm -f "docs.json" - fi + # Define files to keep (whitelist) + keep_files=( + "branches-config.json" + ".gitignore" + "README.md" + ".vale.ini" + ) - # Also clean up the subfolder if it exists (using input parameter) - SUBFOLDER="${{ inputs.subfolder || 'docs3' }}" - if [ -n "$SUBFOLDER" ] && [ -d "$SUBFOLDER" ]; then - echo "πŸ—‘οΈ Removing old $SUBFOLDER subfolder" - rm -rf "$SUBFOLDER" - fi + echo "πŸ“‹ Folders to keep: ${keep_folders[*]}" + echo "πŸ“‹ Files to keep: ${keep_files[*]}" - # Remove old version folders that aren't in the current config - # Look for directories that look like version folders (numeric or version-like names) + # Remove all directories except the ones we want to keep + echo "πŸ—‘οΈ Removing old directories..." for dir in */; do if [ -d "$dir" ]; then dir_name=${dir%/} # Remove trailing slash - - # Skip non-version directories - if [[ "$dir_name" =~ ^\..*$ ]] || [ "$dir_name" = ".github" ]; then - continue - fi - - # Check if this folder is in current config - is_current=false - for current in $current_folders; do - if [ "$dir_name" = "$current" ]; then - is_current=true + + should_keep=false + for keep_folder in "${keep_folders[@]}"; do + if [ "$dir_name" = "$keep_folder" ]; then + should_keep=true break fi done + + if [ "$should_keep" = false ]; then + echo "πŸ—‘οΈ Removing directory: $dir_name/" + rm -rf "$dir" + else + echo "πŸ“ Keeping essential directory: $dir_name/" + fi + fi + done - # If it's not in current config and looks like a version folder, remove it - if [ "$is_current" = false ]; then - # Only remove if it looks like a version folder (contains numbers/dots or common version patterns) - if [[ "$dir_name" =~ ^[0-9]+\.[0-9]+$ ]] || [[ "$dir_name" =~ ^v[0-9] ]] || [[ "$dir_name" =~ ^[0-9] ]]; then - echo "πŸ—‘οΈ Removing old version folder: $dir_name" - rm -rf "$dir" - else - echo "πŸ“ Keeping non-version directory: $dir_name" + # Remove all files except the ones we want to keep + echo "πŸ—‘οΈ Removing old files..." + for file in *; do + if [ -f "$file" ]; then + should_keep=false + for keep_file in "${keep_files[@]}"; do + if [ "$file" = "$keep_file" ]; then + should_keep=true + break fi + done + + if [ "$should_keep" = false ]; then + echo "πŸ—‘οΈ Removing file: $file" + rm -f "$file" else - echo "πŸ“ Keeping current version folder: $dir_name (will be refreshed)" + echo "πŸ“„ Keeping essential file: $file" fi fi done @@ -129,7 +169,7 @@ jobs: echo "πŸ”„ Starting branch cloning and organization..." # Read the branches config and extract branch information - branches=$(python3 -c "import json; config=json.load(open('branches-config.json')); [print(f\"{v.get('folder','')}:{v.get('branch','main')}\") for v in config.get('versions',[]) if v.get('folder','')]") + branches=$(python3 -c "import json; config=json.load(open('branches-config.json')); [print(f\"{v.get('sourceFolder','')}:{v.get('branch','main')}\") for v in config.get('versions',[]) if v.get('sourceFolder','')]") echo "πŸ“‹ Branches to process:" echo "$branches" @@ -160,8 +200,18 @@ jobs: mkdir -p "$folder" echo "πŸ“ Moving contents from $temp_dir to $folder..." - # Copy all files except .git directory - find "$temp_dir" -mindepth 1 -maxdepth 1 ! -name '.git' -exec cp -r {} "$folder/" \; + # Copy documentation content only, excluding development/build files + # Excluded: .git, scripts/, branches-config.json, .github/, README.md, .gitignore, .devcontainer/, .probe + find "$temp_dir" -mindepth 1 -maxdepth 1 \ + ! -name '.git' \ + ! -name 'scripts' \ + ! -name 'branches-config.json' \ + ! -name '.github' \ + ! -name 'README.md' \ + ! -name '.gitignore' \ + ! -name '.devcontainer' \ + ! -name '.probe' \ + -exec cp -r {} "$folder/" \; # Clean up temp directory rm -rf "$temp_dir" @@ -180,19 +230,21 @@ jobs: run: | echo "πŸ”„ Running documentation merger..." - # Get subfolder from input (with fallback) - SUBFOLDER="${{ inputs.subfolder || 'docs3' }}" - echo "πŸ“ Using subfolder: $SUBFOLDER" + # Get subfolder from input (no fallback - empty means root deployment) + SUBFOLDER="${{ inputs.subfolder }}" + echo "πŸ“ Using subfolder: '$SUBFOLDER'" # Run the merge script with branches config if [ -n "$SUBFOLDER" ]; then - python3 merge_docs_configs.py \ + echo "πŸ“ Deploying to subfolder: $SUBFOLDER" + python3 scripts/merge_docs_configs.py \ --branches-config branches-config.json \ --base-dir . \ --subfolder "$SUBFOLDER" \ --output docs.json else - python3 merge_docs_configs.py \ + echo "πŸ“ Deploying to root (no subfolder)" + python3 scripts/merge_docs_configs.py \ --branches-config branches-config.json \ --base-dir . \ --output docs.json @@ -205,7 +257,7 @@ jobs: echo "🧹 Cleaning up temporary cloned version folders..." # Get current version folders from config - current_folders=$(python3 -c "import json; config=json.load(open('branches-config.json')); folders=[v.get('folder','') for v in config.get('versions',[]) if v.get('folder','')]; print(' '.join(folders))") + current_folders=$(python3 -c "import json; config=json.load(open('branches-config.json')); folders=[v.get('sourceFolder','') for v in config.get('versions',[]) if v.get('sourceFolder','')]; print(' '.join(folders))") echo "πŸ“‹ Removing cloned folders: $current_folders" @@ -219,6 +271,12 @@ jobs: echo "βœ… Cleanup of cloned folders completed!" + - name: Add canonical URLs to MDX files + run: | + echo "🧩 Running canonical URL update script..." + python3 scripts/add-canonical-urls/index.py || { echo "❌ Canonical script failed"; exit 1; } + echo "βœ… Canonical URL update completed." + - name: Verify output run: | echo "πŸ“‹ Checking generated files..." @@ -237,7 +295,38 @@ jobs: echo "πŸ“ Generated file structure:" find . -name "*.mdx" -o -name "*.md" -o -name "*.json" -o -name "*.css" -o -name "*.png" | head -20 + - name: Close previous deployment PRs + run: | + echo "πŸ” Finding and closing previous deployment PRs..." + + # Find PRs that match BOTH criteria: + # 1. Have the "auto-deployment" label + # 2. Branch starts with "docs-merge-" + DEPLOYMENT_PRS=$(gh pr list \ + --state open \ + --label "auto-deployment" \ + --json number,headRefName,labels \ + --jq '.[] | select(.headRefName | startswith("docs-merge-")) | .number') + + if [ -n "$DEPLOYMENT_PRS" ]; then + echo "πŸ“‹ Found deployment PRs to close: $DEPLOYMENT_PRS" + + for pr_number in $DEPLOYMENT_PRS; do + echo "❌ Closing deployment PR #$pr_number" + gh pr close "$pr_number" \ + --comment "πŸ€– Superseded by newer deployment (Run #${{ github.run_number }})" \ + || echo "⚠️ Failed to close PR #$pr_number (may already be closed)" + done + + echo "βœ… Closed all previous deployment PRs" + else + echo "βœ… No deployment PRs found to close" + fi + env: + GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + - name: Create Pull Request + id: create-pr uses: peter-evans/create-pull-request@v7 with: token: ${{ secrets.ORG_GH_TOKEN }} @@ -249,9 +338,16 @@ jobs: This PR contains automatically merged documentation from multiple branches. - **Generated from:** `branches-config.json` - **Timestamp:** ${{ github.event.head_commit.timestamp }} - **Run ID:** ${{ github.run_id }} + **Triggered by:** + - **Branch:** ${{ inputs.triggering_branch || 'N/A' }} + - **Commit:** ${{ inputs.triggering_commit_sha || 'N/A' }} + - **PR:** ${{ inputs.triggering_pr_number && format('#{0} - {1}', inputs.triggering_pr_number, inputs.triggering_pr_title) || 'N/A (direct push)' }}${{ inputs.original_pr_number && inputs.original_pr_number != inputs.triggering_pr_number && format(' (cherry-pick of #{0})', inputs.original_pr_number) || '' }} + + **Deployment Details:** + - **Generated from:** `branches-config.json` + - **Run ID:** ${{ github.run_id }} + - **Subfolder:** `${{ inputs.subfolder || '(root deployment)' }}` + - **Timestamp:** $(date) ### Changes Include: - βœ… Merged documentation from multiple branches @@ -259,12 +355,15 @@ jobs: - βœ… Updated assets and content structure - βœ… Cleaned up outdated version folders - ### Subfolder Used: - `${{ inputs.subfolder || 'docs3' }}` - --- - Please review the changes and merge when ready. + 🚦 **This PR will be processed by merge queue to ensure proper validation and ordering.** + + Previous deployment PRs have been automatically closed to prevent conflicts. + labels: | + documentation + auto-deployment + automated commit-message: | πŸ€– Auto-merge documentation from branches @@ -279,10 +378,34 @@ jobs: author: ${{ github.actor }} <${{ github.actor_id }}+${{ github.actor }}@users.noreply.github.com> committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> + - name: Enable auto-merge on deployment PR + if: steps.create-pr.outputs.pull-request-number + run: | + PR_NUMBER="${{ steps.create-pr.outputs.pull-request-number }}" + echo "οΏ½ Enabling auto-merge on deployment PR #$PR_NUMBER..." + + gh pr merge --squash --auto "$PR_NUMBER" + + echo "βœ… Auto-merge enabled on PR #$PR_NUMBER" + env: + GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + - name: Create deployment summary run: | echo "## πŸ“š Documentation Deployment Summary" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY + echo "### πŸš€ Trigger Information" >> $GITHUB_STEP_SUMMARY + echo "- **Branch:** ${{ inputs.triggering_branch || 'N/A' }}" >> $GITHUB_STEP_SUMMARY + echo "- **Commit:** ${{ inputs.triggering_commit_sha || 'N/A' }}" >> $GITHUB_STEP_SUMMARY + if [ -n "${{ inputs.triggering_pr_number }}" ]; then + echo "- **PR:** #${{ inputs.triggering_pr_number }} - ${{ inputs.triggering_pr_title }}" >> $GITHUB_STEP_SUMMARY + if [ -n "${{ inputs.original_pr_number }}" ] && [ "${{ inputs.original_pr_number }}" != "${{ inputs.triggering_pr_number }}" ]; then + echo "- **Original PR:** #${{ inputs.original_pr_number }} (cherry-picked)" >> $GITHUB_STEP_SUMMARY + fi + else + echo "- **PR:** N/A (direct push)" >> $GITHUB_STEP_SUMMARY + fi + echo "" >> $GITHUB_STEP_SUMMARY echo "### βœ… Successfully merged documentation" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Generated files:**" >> $GITHUB_STEP_SUMMARY @@ -294,4 +417,4 @@ jobs: fi echo "" >> $GITHUB_STEP_SUMMARY - echo "**Timestamp:** $(date)" >> $GITHUB_STEP_SUMMARY \ No newline at end of file + echo "**Timestamp:** $(date)" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/eod-report-generator.yaml b/.github/workflows/eod-report-generator.yaml new file mode 100644 index 000000000..beff81698 --- /dev/null +++ b/.github/workflows/eod-report-generator.yaml @@ -0,0 +1,44 @@ +name: EOD Report Generator + +on: + workflow_dispatch: + inputs: + MAIN_REVIEWER: + description: "The GitHub user ID of the primary reviewer whose approval is required for the PR." + required: true + default: "sharadregoti" + START_DATE: + description: "The start date of report (e.g., 2025-02-21T00:00:00.000Z)." + required: true + END_DATE: + description: "The end date of report. Defaults to the current date (e.g., 2025-02-25T00:00:00.000Z)." + required: false + +jobs: + run-script: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' # Change to your required version + + - name: Install dependencies + run: | + cd scripts/eod-report-generator + npm install + + - name: Run script + run: | + cd scripts/eod-report-generator + node index.js + env: + MAIN_REVIEWER: ${{ inputs.MAIN_REVIEWER }} + START_DATE: ${{ inputs.START_DATE }} + END_DATE: ${{ inputs.END_DATE }} + GITHUB_TOKEN: ${{ secrets.TYK_SCRIPTS_TOKEN }} # GitHub Token for API calls + ANTHROPIC_KEY: ${{ secrets.ANTHROPIC_KEY }} # Org key already available diff --git a/.github/workflows/mirror-pr-to-build-deploy.yml b/.github/workflows/mirror-pr-to-build-deploy.yml index 39e8b2170..c750234ed 100644 --- a/.github/workflows/mirror-pr-to-build-deploy.yml +++ b/.github/workflows/mirror-pr-to-build-deploy.yml @@ -28,52 +28,38 @@ jobs: # Create new mirror PR echo "Creating new mirror PR..." - # Create PR body content - PR_BODY="**πŸ”— Auto-generated mirror PR for Mintlify preview** + # Create PR body content using printf to avoid shell parsing issues + printf '%s\n' \ + "**πŸ”— Auto-generated mirror PR for Mintlify preview**" \ + "" \ + "**Original PR:** #${{ github.event.number }}" \ + "**Author:** @${{ github.event.pull_request.user.login }}" \ + "" \ + "## Purpose" \ + "This PR provides a Mintlify preview link for reviewing documentation changes." \ + "" \ + "## Preview Link" \ + "The Mintlify preview will be available once this PR is processed." \ + "" \ + "## ⚠️ Important Notes" \ + "- **Do not merge this PR directly**" \ + "- This PR will be auto-merged when the original PR #${{ github.event.number }} is merged" \ + "- Make all comments and reviews on the original PR #${{ github.event.number }}" \ + "" \ + "## Changes" \ + "${{ github.event.pull_request.body }}" > pr_body.txt - **Original PR:** #${{ github.event.number }} - **Author:** @${{ github.event.pull_request.user.login }} + # Escape the title properly to handle special characters and spaces + ESCAPED_TITLE=$(printf '%s' "πŸ”„ Preview: ${{ github.event.pull_request.title }}" | sed 's/"/\\"/g') - ## Purpose - This PR provides a Mintlify preview link for reviewing documentation changes. - - ## Preview Link - The Mintlify preview will be available once this PR is processed. - - ## ⚠️ Important Notes - - **Do not merge this PR directly** - - This PR will be auto-merged when the original PR #${{ github.event.number }} is merged - - Make all comments and reviews on the original PR #${{ github.event.number }} - - ## Changes - ${{ github.event.pull_request.body }}" - gh pr create \ --base production \ - --head ${{ github.head_ref }} \ - --title "πŸ”„ Preview: ${{ github.event.pull_request.title }}" \ - --body "$PR_BODY" + --head "${{ github.head_ref }}" \ + --title "$ESCAPED_TITLE" \ + --body-file pr_body.txt \ + --draft echo "βœ… Mirror PR created successfully" - - # Get the mirror PR number that was just created - MIRROR_PR=$(gh pr list --base production --head ${{ github.head_ref }} --json number --jq '.[0].number // empty') - - # Comment on the original PR with link to mirror PR - if [ -n "$MIRROR_PR" ]; then - COMMENT_BODY="πŸ”— **Preview Link Available** - - This PR has an auto-generated mirror for Mintlify preview: - πŸ‘‰ **[View Preview PR #$MIRROR_PR](https://github.com/${{ github.repository }}/pull/$MIRROR_PR)** - - The Mintlify preview link will appear on the mirror PR once it's processed. - - --- - *This is an automated comment. All discussion should happen on this PR, not the mirror PR.*" - - gh pr comment ${{ github.event.number }} --body "$COMMENT_BODY" - echo "βœ… Comment added to original PR with mirror PR link" - fi else echo "πŸ”„ Mirror PR #$MIRROR_PR already exists and will be auto-updated" fi diff --git a/.github/workflows/pr_agent.yaml b/.github/workflows/pr_agent.yaml new file mode 100644 index 000000000..57f0b0a5c --- /dev/null +++ b/.github/workflows/pr_agent.yaml @@ -0,0 +1,20 @@ +on: + pull_request: + types: + - opened + - reopened + - synchronize + - ready_for_review + issue_comment: +jobs: + pr_agent_job: + runs-on: ubuntu-latest + name: Run pr agent on every pull request, respond to user comments + if: ${{ !github.event.pull_request.draft }} + steps: + - name: PR Agent action step + id: pragent + uses: Codium-ai/pr-agent@main + env: + OPENAI_KEY: ${{ secrets.OPENAI_KEY }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/probe-writer.yaml b/.github/workflows/probe-writer.yaml new file mode 100644 index 000000000..5e2493da5 --- /dev/null +++ b/.github/workflows/probe-writer.yaml @@ -0,0 +1,24 @@ +name: Probe Writer handler + +on: + issue_comment: + types: [created] + +# Define permissions needed for the workflow +permissions: + issues: write + pull-requests: write + contents: write + +jobs: + trigger_probe_implement: + uses: buger/probe/.github/workflows/probe.yml@main + with: + command_prefix: "/writer" # Or '/ai', '/ask', etc. + allow_edit: true + prompt: .github/.probe/writer.txt + secrets: + ANTHROPIC_API_KEY: ${{ secrets.PROBE_ANTHROPIC_API_KEY }} + ANTHROPIC_API_URL: ${{ secrets.PROBE_ANTHROPIC_URL }} + APP_ID: ${{ secrets.PROBE_APP_ID }} + APP_PRIVATE_KEY: ${{ secrets.PROBE_APP_PRIVATE_KEY }} \ No newline at end of file diff --git a/.github/workflows/probe.yaml b/.github/workflows/probe.yaml new file mode 100644 index 000000000..965a2923e --- /dev/null +++ b/.github/workflows/probe.yaml @@ -0,0 +1,37 @@ +name: Probe handler + +on: + pull_request: + types: [opened] #[opened , labeled] + issue_comment: + types: [created] + issues: + types: [opened] #[opened, labeled] + +# Define permissions needed for the workflow +permissions: + issues: write + pull-requests: write + contents: read + +jobs: + trigger_probe_chat: + # Uncomment if you want to run on on specific lables, in this example `probe` + # if: | + # (github.event_name == 'pull_request' && github.event.action == 'opened') || + # (github.event_name == 'issues' && github.event.action == 'opened') || + # (github.event_name == 'issue_comment' && github.event.action == 'created') || + # ((github.event_name == 'pull_request' || github.event_name == 'issues') && + # github.event.action == 'labeled' && github.event.label.name == 'probe') + # Use the reusable workflow from your repository (replace and ) + uses: buger/probe/.github/workflows/probe.yml@main + # Pass required inputs + with: + command_prefix: "/probe" # Or '/ai', '/ask', etc. + # Optionally override the default npx command if the secret isn't set + # default_probe_chat_command: 'node path/to/custom/script.js' + # Pass ALL secrets from this repository to the reusable workflow + # This includes GITHUB_TOKEN, PROBE_CHAT_COMMAND (if set), ANTHROPIC_API_KEY, etc. + secrets: + ANTHROPIC_API_KEY: ${{ secrets.PROBE_ANTHROPIC_API_KEY }} + ANTHROPIC_API_URL: ${{ secrets.PROBE_ANTHROPIC_URL }} diff --git a/.github/workflows/release-bot.yaml b/.github/workflows/release-bot.yaml new file mode 100644 index 000000000..748500b88 --- /dev/null +++ b/.github/workflows/release-bot.yaml @@ -0,0 +1,189 @@ +name: Cherry-pick to Release Branch + +on: + issue_comment: + types: [created] + workflow_call: + +jobs: + cherry_pick: + runs-on: ubuntu-latest + steps: + - name: Check for release command + id: check_command + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const { issue, comment } = context.payload; + if (!issue || !issue.pull_request || !comment || !comment.body.startsWith('/release to ')) { + core.setOutput('release_valid', 'false'); + return; + } + const releaseBranch = comment.body.split('/release to ')[1].trim(); + core.setOutput('release_valid', 'true'); + core.setOutput('release_branch', releaseBranch); + core.setOutput('pr_number', issue.number); + + - name: Check admin permissions + if: steps.check_command.outputs.release_valid == 'true' + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const username = context.payload.comment.user.login; + const authorAssociation = context.payload.comment.author_association; + + // Quick check: Repository owner always allowed + if (authorAssociation === 'OWNER') { + console.log(`βœ… User ${username} is repository owner`); + return; + } + + // Check for admin permission + try { + const { data: permission } = await github.rest.repos.getCollaboratorPermissionLevel({ + owner: context.repo.owner, + repo: context.repo.repo, + username: username + }); + + if (permission.permission !== 'admin') { + core.setFailed(`❌ Only repository admins can use /release command. User ${username} has: ${permission.permission}`); + return; + } + + console.log(`βœ… User ${username} has admin permissions`); + } catch (error) { + core.setFailed(`❌ Permission check failed: ${error.message}`); + } + + - name: Install GitHub CLI (for act/local testing) + if: steps.check_command.outputs.release_valid == 'true' + run: | + sudo apt update + sudo apt install -y curl unzip gnupg + curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo tee /usr/share/keyrings/githubcli-archive-keyring.gpg >/dev/null + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null + sudo apt update + sudo apt install -y gh + + - name: Checkout repository + if: steps.check_command.outputs.release_valid == 'true' + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set default branch variable + if: steps.check_command.outputs.release_valid == 'true' + run: echo "DEFAULT_BRANCH=${{ github.event.repository.default_branch }}" >> $GITHUB_ENV + + - name: Skip jobs if not a valid release command + if: steps.check_command.outputs.release_valid == 'false' + run: echo "Skipping cherry-pick as the release command is not valid." + continue-on-error: true + + - name: Setup Git + if: steps.check_command.outputs.release_valid == 'true' + run: | + git config --global user.email "bot@tyk.io" + git config --global user.name "Tyk Bot" + + - name: Get PR base and merge SHAs + id: pr_details + if: steps.check_command.outputs.release_valid == 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_NUMBER=${{ steps.check_command.outputs.pr_number }} + MERGE_COMMIT=$(gh pr view "$PR_NUMBER" --json mergeCommit --jq '.mergeCommit.oid // empty') + BASE_SHA=$(gh pr view "$PR_NUMBER" --json baseRefOid --jq '.baseRefOid // empty') + echo "MERGE_COMMIT=$MERGE_COMMIT" >> $GITHUB_ENV + echo "BASE_SHA=$BASE_SHA" >> $GITHUB_ENV + echo "MERGE_COMMIT=$MERGE_COMMIT" >> $GITHUB_OUTPUT + echo "BASE_SHA=$BASE_SHA" >> $GITHUB_OUTPUT + + - name: Cherry-pick PR into release branch + id: cherry_pick + if: steps.check_command.outputs.release_valid == 'true' + env: + GITHUB_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + GITHUB_REPO: ${{ github.repository }} + GITHUB_BRANCH: ${{ steps.check_command.outputs.release_branch }} + run: | + export FOLDER=$(basename "$GITHUB_REPO") + rm -rf $FOLDER + git clone https://x-access-token:$GITHUB_TOKEN@github.com/$GITHUB_REPO + cd $FOLDER + + git checkout $GITHUB_BRANCH + git pull + + NEW_BRANCH=merge/$GITHUB_BRANCH/$MERGE_COMMIT + git branch -D $NEW_BRANCH 2>/dev/null || true + REMOTE_EXISTS=$(git ls-remote --heads origin $NEW_BRANCH | wc -l) + [ "$REMOTE_EXISTS" -gt 0 ] && git push origin --delete $NEW_BRANCH || true + + git checkout -b $NEW_BRANCH + + MERGE_FAILED=0 + git cherry-pick -x $BASE_SHA..$MERGE_COMMIT || MERGE_FAILED=$? + + git diff --quiet origin/$GITHUB_BRANCH HEAD && { + echo "No changes to cherry-pick" + echo "PR_URL=" >> $GITHUB_OUTPUT + echo "MERGE_FAILED=0" >> $GITHUB_OUTPUT + exit 0 + } + + git push origin $NEW_BRANCH --force + + TITLE=$(git log --format=%s -n 1 $MERGE_COMMIT) + BODY=$(git log --format=%B -n 1 $MERGE_COMMIT) + + PR_URL=$(gh pr create \ + --title "Merging to $GITHUB_BRANCH: $TITLE" \ + --body "$BODY" \ + --repo $GITHUB_REPO \ + --base $GITHUB_BRANCH \ + --head $NEW_BRANCH \ + $( [ "$MERGE_FAILED" -ne 0 ] && echo "--draft" )) + + echo "PR_URL=$PR_URL" >> $GITHUB_OUTPUT + echo "MERGE_FAILED=$MERGE_FAILED" >> $GITHUB_OUTPUT + + if [ "$MERGE_FAILED" -eq 0 ]; then + if [[ "$PR_URL" =~ /pull/([0-9]+) ]]; then + PR_NUMBER="${BASH_REMATCH[1]}" + gh pr merge --squash "$PR_NUMBER" --auto --subject "Merging to $GITHUB_BRANCH: $TITLE" --body "$BODY" || echo "Auto-merge failed" + fi + fi + + - name: Comment back on original PR + if: steps.check_command.outputs.release_valid == 'true' && always() + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const prUrl = '${{ steps.cherry_pick.outputs.PR_URL }}'; + const mergeFailed = '${{ steps.cherry_pick.outputs.MERGE_FAILED }}' === '1'; + let body; + + if ('${{ job.status }}' === 'success') { + if (mergeFailed) { + body = `⚠️ Cherry-pick completed with conflicts. A draft PR was created: ${prUrl}`; + } else if (prUrl) { + body = `βœ… Cherry-pick successful. A PR was created and auto-merged (if allowed): ${prUrl}`; + } else { + body = `ℹ️ Cherry-pick skipped: no changes needed in target branch.`; + } + } else { + body = '❌ Cherry-pick failed. Please check the workflow logs.'; + } + + github.rest.issues.createComment({ + issue_number: ${{ steps.check_command.outputs.pr_number }}, + owner: context.repo.owner, + repo: context.repo.repo, + body: body + }); diff --git a/.github/workflows/release-to-branches-with-label.yml b/.github/workflows/release-to-branches-with-label.yml new file mode 100644 index 000000000..0c9e6e5e0 --- /dev/null +++ b/.github/workflows/release-to-branches-with-label.yml @@ -0,0 +1,104 @@ +name: On Pull Request Merged to Master + +on: + pull_request: + # Only trigger on pull requests targeting main/master + branches: + - master + - main + types: + - closed + +jobs: + run-on-pr-merged: + runs-on: ubuntu-latest + + # Only run if the PR was actually merged + if: ${{ github.event.pull_request.merged == true }} + + steps: + - name: Add a comment to the merged PR (only if labeler is in the org) + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.ORG_GH_TOKEN }} + script: | + // 1. The label format: e.g., "release-1", "release-1.0" + const labelRegex = /^release-[0-9]+(\.[0-9]+)?$/; + + // 2. Get PR info + const pullRequestNumber = context.payload.pull_request.number; + const labelsOnPR = context.payload.pull_request.labels || []; + console.log("Labels on the Pull Request:", labelsOnPR.map(label => label.name)); + // 3. Get all timeline events to see who labeled the PR + const { data: prEvents } = await github.rest.issues.listEvents({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pullRequestNumber + }); + + // 4. Filter down to "labeled" events + const labeledEvents = prEvents.filter(ev => ev.event === 'labeled'); + console.log("Labeled Events:",labeledEvents.map(event => ({ + label: event.label?.name, + user: event.actor?.login, + timestamp: event.created_at + }))); + // 5. Build a map: labelName -> last user who added it + // (We reverse to get the *most recent* labeler, if a label was added multiple times) + const labelToLastLabeler = {}; + for (const event of labeledEvents.reverse()) { + const labelName = event.label?.name; + const userName = event.actor?.login; + if (labelName && userName && !labelToLastLabeler[labelName]) { + labelToLastLabeler[labelName] = userName; + } + } + + // 6. For each label on the PR, check if it matches "release-.." + // If yes, we see who labeled it last and check their membership + for (const label of labelsOnPR) { + if (labelRegex.test(label.name)) { + const userWhoAddedLabel = labelToLastLabeler[label.name]; + + // If there's no recorded user (edge case), skip + if (!userWhoAddedLabel) { + console.log(`User not found for label: ${label.name}`); + continue; + } + + // 7. Check if the user is in the org + let isMember = false; + try { + await github.rest.orgs.checkMembershipForUser({ + org: 'TykTechnologies', + username: userWhoAddedLabel + }); + // If this call succeeds, they're a member + isMember = true; + console.log(`User '${userWhoAddedLabel}' is a member of the organization '${'TykTechnologies'}'.`); + } catch (error) { + // If 404, user is not a member. Anything else is an unexpected error. + if (error.status === 404) { + console.log(`User '${userWhoAddedLabel}' is NOT a member of the organization '${'TykTechnologies'}'.`); + }else { + console.error(`An error occurred while checking membership for user '${userWhoAddedLabel}':`, error); + throw error; + } + } + + // 8. Comment only if user is in the org + if (isMember) { + console.log(`Creating comment for label '${label.name}' on PR #${pullRequestNumber} by user '${userWhoAddedLabel}'.`); + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pullRequestNumber, + body: `/release to ${label.name}` + }); + }else{ + console.log(`No comment created for label '${label.name}' on PR #${pullRequestNumber} because the user '${userWhoAddedLabel}' is not a member of the organization 'TykTechnologies'.`); + } + }else{ + console.log(`Label '${label.name}' does not match the expected format.`); + } + } \ No newline at end of file diff --git a/.github/workflows/site-content-analysis.yml b/.github/workflows/site-content-analysis.yml new file mode 100644 index 000000000..498b548fc --- /dev/null +++ b/.github/workflows/site-content-analysis.yml @@ -0,0 +1,162 @@ +name: Site Content Analysis + +on: + workflow_dispatch: + inputs: + wait_time: + description: 'Wait time per page (seconds)' + required: false + default: '3' + type: string + timeout: + description: 'Timeout per page (seconds)' + required: false + default: '30' + type: string + +jobs: + analyze-site-content: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: production + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - name: Install system dependencies + run: | + sudo apt-get update + # Install essential packages for headless Chrome + sudo apt-get install -y \ + ca-certificates \ + fonts-liberation \ + libnss3 \ + lsb-release \ + xdg-utils \ + wget \ + gnupg + + # Install Chrome dependencies (with fallbacks for different Ubuntu versions) + sudo apt-get install -y \ + libatk1.0-0 \ + libatk-bridge2.0-0 \ + libcups2 \ + libdrm2 \ + libgtk-3-0 \ + libgtk-4-1 \ + libxcomposite1 \ + libxdamage1 \ + libxrandr2 \ + libgbm1 \ + libxss1 \ + libasound2 || \ + sudo apt-get install -y \ + libatk1.0-0 \ + libatk-bridge2.0-0 \ + libcups2 \ + libdrm2 \ + libgtk-3-0 \ + libxcomposite1 \ + libxdamage1 \ + libxrandr2 \ + libgbm1 \ + libxss1 \ + libasound2t64 + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install pyppeteer beautifulsoup4 + + - name: Download Chromium for Pyppeteer + run: | + python -c "import asyncio; from pyppeteer import launch; asyncio.get_event_loop().run_until_complete(launch())" + + - name: Run site content analysis + run: | + python scripts/browser_site_analyzer.py \ + --base-url https://tyk.mintlify.app \ + --docs-json docs.json \ + --output-dir site_analysis_output \ + --report-file site_analysis_report.json \ + --wait-time ${{ github.event.inputs.wait_time || '3' }} \ + --timeout ${{ github.event.inputs.timeout || '30' }} + + - name: Create summary comment + if: always() + run: | + echo "## πŸ” Site Content Analysis Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Analysis completed for:** https://tyk.mintlify.app" >> $GITHUB_STEP_SUMMARY + echo "**Wait time:** ${{ github.event.inputs.wait_time || '3' }} seconds" >> $GITHUB_STEP_SUMMARY + echo "**Timeout:** ${{ github.event.inputs.timeout || '30' }} seconds" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + if [ -f site_analysis_report.json ]; then + echo "### Summary Statistics" >> $GITHUB_STEP_SUMMARY + echo '```json' >> $GITHUB_STEP_SUMMARY + python -c " + import json + with open('site_analysis_report.json', 'r') as f: + data = json.load(f) + summary = data['summary'] + print(f'Total pages analyzed: {summary[\"total_pages_analyzed\"]}') + print(f'Pages with sufficient content: {summary[\"pages_with_sufficient_content\"]}') + print(f'Pages with empty/insufficient content: {summary[\"pages_with_empty_content\"]}') + print(f'Browser failures: {summary[\"browser_failures\"]}') + " >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + # Show problematic pages if any + python -c " + import json + with open('site_analysis_report.json', 'r') as f: + data = json.load(f) + if data['empty_pages']: + print('### ❌ Pages with Content Issues') + for page in data['empty_pages'][:10]: # Show first 10 + print(f'- **{page[\"url\"]}**: {page[\"issues\"][0] if page[\"issues\"] else \"Unknown issue\"}') + if len(data['empty_pages']) > 10: + print(f'- ... and {len(data[\"empty_pages\"]) - 10} more pages') + " >> $GITHUB_STEP_SUMMARY + else + echo "❌ Analysis failed - no report generated" >> $GITHUB_STEP_SUMMARY + fi + + echo "" >> $GITHUB_STEP_SUMMARY + echo "πŸ“„ **All analysis results are displayed above**" >> $GITHUB_STEP_SUMMARY + + - name: Fail job if critical issues found + if: always() + run: | + if [ -f site_analysis_report.json ]; then + python -c " + import json, sys + with open('site_analysis_report.json', 'r') as f: + data = json.load(f) + summary = data['summary'] + total = summary['total_pages_analyzed'] + empty = summary['pages_with_empty_content'] + + if total > 0: + empty_percentage = (empty / total) * 100 + print(f'Empty content percentage: {empty_percentage:.1f}%') + + # Fail if more than 20% of pages have empty content + if empty_percentage > 20: + print(f'❌ CRITICAL: {empty_percentage:.1f}% of pages have empty content (threshold: 20%)') + sys.exit(1) + else: + print(f'βœ… Content quality acceptable: {empty_percentage:.1f}% empty pages') + else: + print('❌ CRITICAL: No pages were analyzed') + sys.exit(1) + " + fi diff --git a/.github/workflows/trigger-docs-deploy.yml b/.github/workflows/trigger-docs-deploy.yml new file mode 100644 index 000000000..49a5c5420 --- /dev/null +++ b/.github/workflows/trigger-docs-deploy.yml @@ -0,0 +1,140 @@ +name: Trigger Documentation Deployment + +on: + push: + branches: + - main + - 'release-*' + +jobs: + trigger-deploy: + runs-on: ubuntu-latest + steps: + - name: Find the actual merged PR + id: get-pr + run: | + COMMIT_SHA="${{ github.sha }}" + COMMIT_MESSAGE="${{ github.event.head_commit.message }}" + echo "Looking for PR that was merged with commit: $COMMIT_SHA" + echo "Commit message: $COMMIT_MESSAGE" + + # Search for PRs that were merged with this exact commit SHA + echo "Searching for merged PR with commit SHA: $COMMIT_SHA" + + # Use a temporary file to avoid shell parsing issues + curl -s \ + -H "Authorization: token ${{ secrets.ORG_GH_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls?state=closed&sort=updated&direction=desc&per_page=50" > /tmp/prs.json + + # Find PR with matching merge commit SHA + ACTUAL_PR_NUMBER=$(jq -r --arg commit "$COMMIT_SHA" '.[] | select(.merge_commit_sha == $commit) | .number' /tmp/prs.json | head -1) + + if [ -n "$ACTUAL_PR_NUMBER" ] && [ "$ACTUAL_PR_NUMBER" != "null" ]; then + # Get PR details + ACTUAL_PR_TITLE=$(jq -r --arg commit "$COMMIT_SHA" '.[] | select(.merge_commit_sha == $commit) | .title' /tmp/prs.json | head -1) + PR_AUTHOR=$(jq -r --arg commit "$COMMIT_SHA" '.[] | select(.merge_commit_sha == $commit) | .user.login' /tmp/prs.json | head -1) + + echo "Found actual merged PR: #$ACTUAL_PR_NUMBER - $ACTUAL_PR_TITLE by $PR_AUTHOR" + echo "pr_number=$ACTUAL_PR_NUMBER" >> $GITHUB_OUTPUT + echo "pr_title=$ACTUAL_PR_TITLE" >> $GITHUB_OUTPUT + echo "pr_author=$PR_AUTHOR" >> $GITHUB_OUTPUT + echo "has_pr=true" >> $GITHUB_OUTPUT + + # Also try to extract original PR from commit message for context + ORIGINAL_PR=$(echo "$COMMIT_MESSAGE" | grep -oE '#[0-9]+' | head -1 | sed 's/#//') + if [ -n "$ORIGINAL_PR" ] && [ "$ORIGINAL_PR" != "$ACTUAL_PR_NUMBER" ]; then + echo "Found original PR reference in commit: #$ORIGINAL_PR" + echo "original_pr_number=$ORIGINAL_PR" >> $GITHUB_OUTPUT + echo "has_original_pr=true" >> $GITHUB_OUTPUT + else + echo "has_original_pr=false" >> $GITHUB_OUTPUT + fi + else + echo "No merged PR found via API, falling back to commit message parsing..." + + # Fallback to original method + PR_NUMBER=$(echo "$COMMIT_MESSAGE" | grep -oE '#[0-9]+' | head -1 | sed 's/#//') + + if [ -n "$PR_NUMBER" ]; then + echo "Found PR number in commit message: #$PR_NUMBER" + echo "pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT + echo "has_pr=true" >> $GITHUB_OUTPUT + + # Extract PR title (everything before the PR number reference) + PR_TITLE=$(echo "$COMMIT_MESSAGE" | sed 's/ (#[0-9]\+).*//' | head -1) + echo "pr_title=$PR_TITLE" >> $GITHUB_OUTPUT + echo "has_original_pr=false" >> $GITHUB_OUTPUT + else + echo "No PR number found in commit message (direct push)" + echo "has_pr=false" >> $GITHUB_OUTPUT + echo "has_original_pr=false" >> $GITHUB_OUTPUT + fi + fi + + # Clean up temp file + rm -f /tmp/prs.json + + - name: Trigger production deployment + run: | + # Prepare the dispatch payload + if [ "${{ steps.get-pr.outputs.has_pr }}" = "true" ]; then + if [ "${{ steps.get-pr.outputs.has_original_pr }}" = "true" ]; then + # Include both actual PR and original PR + PAYLOAD=$(jq -n \ + --arg ref "production" \ + --arg pr_number "${{ steps.get-pr.outputs.pr_number }}" \ + --arg pr_title "${{ steps.get-pr.outputs.pr_title }}" \ + --arg original_pr_number "${{ steps.get-pr.outputs.original_pr_number }}" \ + --arg commit_sha "${{ github.sha }}" \ + --arg branch "${{ github.ref_name }}" \ + '{ + ref: $ref, + inputs: { + triggering_pr_number: $pr_number, + triggering_pr_title: $pr_title, + original_pr_number: $original_pr_number, + triggering_commit_sha: $commit_sha, + triggering_branch: $branch + } + }') + echo "Triggering deployment with PR #${{ steps.get-pr.outputs.pr_number }} (original: #${{ steps.get-pr.outputs.original_pr_number }})" + else + # Only actual PR + PAYLOAD=$(jq -n \ + --arg ref "production" \ + --arg pr_number "${{ steps.get-pr.outputs.pr_number }}" \ + --arg pr_title "${{ steps.get-pr.outputs.pr_title }}" \ + --arg commit_sha "${{ github.sha }}" \ + --arg branch "${{ github.ref_name }}" \ + '{ + ref: $ref, + inputs: { + triggering_pr_number: $pr_number, + triggering_pr_title: $pr_title, + triggering_commit_sha: $commit_sha, + triggering_branch: $branch + } + }') + echo "Triggering deployment with PR #${{ steps.get-pr.outputs.pr_number }}" + fi + else + PAYLOAD=$(jq -n \ + --arg ref "production" \ + --arg commit_sha "${{ github.sha }}" \ + --arg branch "${{ github.ref_name }}" \ + '{ + ref: $ref, + inputs: { + triggering_commit_sha: $commit_sha, + triggering_branch: $branch + } + }') + echo "Triggering deployment (direct push, no PR)" + fi + + curl -X POST \ + -H "Authorization: token ${{ secrets.ORG_GH_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + https://api.github.com/repos/${{ github.repository }}/actions/workflows/deploy-docs.yml/dispatches \ + -d "$PAYLOAD" diff --git a/.github/workflows/validate-docs.yml b/.github/workflows/validate-docs.yml new file mode 100644 index 000000000..917dbd411 --- /dev/null +++ b/.github/workflows/validate-docs.yml @@ -0,0 +1,33 @@ +name: Validate Documentation + +on: + pull_request: + +jobs: + validate: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install requests + + - name: Validate documentation + run: | + echo "πŸ” Running documentation validation..." + python scripts/validate_mintlify_docs.py . --validate-redirects --verbose + + - name: Validation complete + if: success() + run: | + echo "βœ… Documentation validation passed!" + echo "All links, images, navigation, and redirects are valid." diff --git a/.vale.ini b/.vale.ini new file mode 100644 index 000000000..8e999c8ff --- /dev/null +++ b/.vale.ini @@ -0,0 +1,10 @@ +MinAlertLevel = suggestion + +Packages = MDX, Google, write-good + + +[*] +BasedOnStyles = Vale, Google, write-good, mintlify-poc + +# Define your vocabulary +Vocab = mintlify-poc diff --git a/.vale/styles/Vocab/mintlify-poc/accept.txt b/.vale/styles/Vocab/mintlify-poc/accept.txt new file mode 100644 index 000000000..39201a520 --- /dev/null +++ b/.vale/styles/Vocab/mintlify-poc/accept.txt @@ -0,0 +1,43 @@ +Tyk +Tyk's +Coprocess +OTel +API's +CVEs +JWTs +proxying +keyspace +Distroless +brotli +mutex +Subgraph +subgraph +Subgraphs +subgraphs +supergraph +supergraphs +build_id +build_ids +enums +allOf +anyOf +oneOf +allowedIPs +dataplanes +commonName +datasource +upstreams +Typename +Fieldname +mdcb +failover +orgs +misconfigured +Hashicorp +Sarama +detailed_tracing +reconnections +rawResponse +ruc +sqlite +wget \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 000000000..c8a8b87cd --- /dev/null +++ b/README.md @@ -0,0 +1,15 @@ +![https://tyk.io/docs/img/Tyk-Docs-logo-Dark.svg "Tyk Docs"](https://tyk.io/docs/img/Tyk-Docs-logo-Dark.svg) + + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/tyktechnologies/tyk-docs/ci.yaml?color=20EDBA&label=Hugo%20Build&logo=Tyk&logoColor=8438FA&style=plastic)](https://github.com/TykTechnologies/tyk-docs/actions/workflows/ci.yaml) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/tyktechnologies/tyk-docs/htmltest.yaml?color=20EDBA&label=HTML%20test&logo=Tyk&logoColor=8438FA&style=plastic)](https://github.com/TykTechnologies/tyk-docs/blob/master/.github/workflows/htmltest.yaml) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/tyktechnologies/tyk-docs/docsearch.yaml?color=20EDBA&label=Indexing%20Search&logo=Tyk&logoColor=8438FA&style=plastic)](https://github.com/TykTechnologies/tyk-docs/actions/workflows/docsearch.yaml) +![Netlify Status](https://img.shields.io/netlify/bd4edbca-27c7-4286-bc69-5a644092d6d2?color=20EDBA&label=Netlify%20Status&logo=Tyk&logoColor=8438FA&style=plastic) + + +# Tyk Documentation + +This repository is the source of the official [Tyk Documentation website](https://tyk.io/docs/). + +## How to Contribute +For issues and contribution, check our [contribution guide](https://tyk.io/docs/developer-support/contribution-guides) and our [release guide](https://tyk.io/docs/developer-support/release-guide) diff --git a/advanced-configuration/other-protocols.mdx b/advanced-configuration/other-protocols.mdx new file mode 100644 index 000000000..7b3a23936 --- /dev/null +++ b/advanced-configuration/other-protocols.mdx @@ -0,0 +1,30 @@ +--- +title: "Advanced Communication Protocols" +description: "How to configure advanced communication protocols" +keywords: "gRPC, SSE, Websocket, Other Protocol" +sidebarTitle: "Overview" +--- + +## Overview + +Tyk API Gateway is primarily designed to handle HTTP/HTTPS traffic, but it also provides robust support for several other protocols to accommodate modern API architectures and communication patterns. This flexibility allows developers to leverage Tyk's security, monitoring, and management capabilities across a wider range of API technologies. + +### Use Cases + +These advanced protocol capabilities enhance Tyk's usefulness beyond traditional REST APIs: + +- **Real-time Applications**: WebSockets enable bidirectional communication for chat applications, collaborative tools, and live dashboards. +- **Microservices Communication**: gRPC support facilitates efficient inter-service communication with strong typing and performance benefits. +- **Event-Driven Architectures**: SSE enables efficient server-push notifications without the overhead of maintaining WebSocket connections. + +## Supported Protocols + +Tyk currently supports the following protocols other than HTTP/HTTPS: + +1. **[TCP Proxy](/key-concepts/tcp-proxy)** + +2. **[gRPC](/key-concepts/grpc-proxy)** + +3. **[Server-Sent Events (SSE)](/advanced-configuration/sse-proxy)** + +4. **[WebSockets](/advanced-configuration/websockets)** \ No newline at end of file diff --git a/advanced-configuration/sse-proxy.mdx b/advanced-configuration/sse-proxy.mdx new file mode 100644 index 000000000..ca3eaa959 --- /dev/null +++ b/advanced-configuration/sse-proxy.mdx @@ -0,0 +1,50 @@ +--- +title: "Server Sent Events Proxy" +description: "Describe how you can use Tyk as a simple Server Sent Events Proxy" +keywords: "SSE Proxy, Other Protocol" +sidebarTitle: "Server Side Events" +--- + +[Server-Sent Events](https://en.wikipedia.org/wiki/Server-sent_events) (SSE) is a server push technology enabling a subscribed client to receive automatic updates from a server via a long running HTTP connection. +Unlike WebSockets, SSE is a one-way communication of server to clients (WebSockets is a bidirectional communication between server and client). +As such, if you only need clients to receive data from a server, and don't require them sending messagess back, SSE could be a simpler way to make that happen. An online stock quotes, or notifications and feeds are good examples for applications that use SSE. + +## Using Tyk as a server-sent events (SSE) Proxy + +Tyk Gateway supports SSE proxying over HTTP, and can sit in the middle between the client and the SSE server and support the server sending updates to the client. + +### Setup +- Enable SSE support on the Gateway: Set `http_server_options.enable_websockets` to `true` in your Tyk Gateway config file. +- To maintain an open connection between the API consumer and the Tyk Gateway, set `http_server_options.read_timeout` and `http_server_options.write_timeout` to appropriately high values (in milliseconds). For example, you could try setting both to `2000`, but this is for you to determine in your environment. +- Set `http_server_options.flush_interval` to an appropriate value, e.g. `1`, to force Tyk to stream the response to the client every `n` seconds. + + +### Example using Tyk as an SSE proxy +For this we will need: + +* An SSE server. For this example we will use [Golang HTML 5 SSE example](https://github.com/kljensen/golang-html5-sse-example) +* An instance of the Tyk Gateway and optionally the Tyk Dashboard + +**Steps for Configuration:** +* Ensure the Gateway configurations detailed in the Setup section are set. +* Run the SSE server as per the example instructions. By default this runs on port `8000`. +``` +go run ./server.go +``` +* Publish an API with the following configuration: + 1. Set an appropriate listen path, e.g. `"listen_path": "/sse"` + 2. Strip the listen path, e.g. `"strip_listen_path": true,` + 3. Set the target url as the SSE server, e.g. the example SSE server:`"target_url": "http://host.docker.internal:8000"` + 4. Click Save, and wait for the Gateway to reload the API before testing it +* To test the protected SSE service via the API in the Tyk Gateway run: +```bash +curl http://localhost:8080/sse/events/ +``` +You should see a stream of updates from the server. In this example, you will see: + +```bash +Message: 20 - the time is 2013-03-08 21:08:01.260967 -0500 EST +Message: 21 - the time is 2013-03-08 21:08:06.262034 -0500 EST +Message: 22 - the time is 2013-03-08 21:08:11.262608 -0500 EST +``` + diff --git a/advanced-configuration/transform-traffic/looping.mdx b/advanced-configuration/transform-traffic/looping.mdx new file mode 100644 index 000000000..30deeacc0 --- /dev/null +++ b/advanced-configuration/transform-traffic/looping.mdx @@ -0,0 +1,103 @@ +--- +title: "Looping" +description: "Learn how to use looping in Tyk's URL Rewriting to redirect requests internally within the gateway for improved performance and flexibility." +order: 5 +sidebarTitle: "What is Internal Looping?" +--- + +## Overview + +If you need to redirect your URL to *another endpoint* in the api or *another api in the gateway* using [URL Rewriting](/transform-traffic/url-rewriting#url-rewrite-middleware), you can run the request pipeline one more time, internally instead of redirect it to a HTTP endpoint through the network. This is called looping. This is very performant because Tyk will not do another network call when a loop is detected. + +In order to specify a loop, in the target URL you specify a string in the protocol schema `tyk://` as shown below: + +This syntax of `tyk` in the schema protocol and `self` in the domian will loop the request to another endpoint under the current api: +``` +tyk://self/ +``` + +You can also loop to another API as by specifying the API name or id (instead of `self`): +``` +tyk:/// +``` + +Combined with our advanced URL rewriter rules, it can be turned into a powerful logical block, replacing the need for writing middleware or virtual endpoints in many cases. + + +## Example Use Cases + +### Multiple Auth Types for a single API + +Imagine you have a legacy API that has existing authentication strategies. We can pretend it's using basic authentication. You've decided to bring this API into your APIM ecosystem, and also begin to use OAuth2 for your API. But also we need to support existing users who have basic auth credentials. Finally, it's important that we expose a single ingress to our customers for that one API, instead of multiple listen paths for each authentication type. + +We can use looping to achieve this. We can set up triggers in URL Rewrite plugin, where based off a specific header, Tyk will loop the request to a specific API. + +For example, let's see the following use case: +Looping example + +#### 1. A request comes into the ingress API. This has two rules: +- If `Header == "Authorization: Bearer"`, loop to the OAuth API +- If `Header == "Authorization: Basic"`, loop to the Basic Auth API + +1. The ingress API is marked "keyless" as Tyk doesn't perform any authentication here. +2. We add rate limiting option to the loop via `?check_limits=true` + +#### 2. The inner APIs perform authentication, and loop to the north-bound API + +These APIs are marked internal, can only be accessed from within loop context. + +#### 3. The north-bound API, marked open keyless, does transformations, etc, then reverse proxies to the backend API. + +1. This API is marked internal, can only be accessed from within loop context. +2. This API is marked "keyless" as Tyk doesn't perform any authentication here. + +## Advanced Configuration + +You can add one or more of the following configurations as query parameters to your looping URL. + +### Rate Limiting in looping + +In looping context, rate limiting (quotas as well) is not checked except when explicitly turned on. You need to add the following query param: +``` +?check_limits=true +``` + +For example: + +``` +tyk://123/myendpoint?check_limits=true +``` + +### HTTP Method transformation in looping + +You can tell Tyk to modify the HTTP verb during looping by adding the following query param: +``` +?method=GET +``` + +For example: + +``` +tyk://123/myendpoint?method=GET +``` + +### Loop Limiting + +In order to avoid endless recursion, there's a default limit loop level of 5 which is set in the request level (i.e. set per request). +In case the loop level has gone beiod the allowed limit the user will get the error `"Loop level too deep. Found more than %d loops in single request"`. +You can set the loop level limit with a query param as shown below. Please note that you can only set it once per request. After that, you can't overwrite with a new loop level limit. + + +Tell Tyk to limit the number of loops by adding the following query param: +``` +?loop_limit={int} +``` + +For example: + +``` +tyk://123/myendpoint?loop_limit={int} +``` + + + diff --git a/advanced-configuration/transform-traffic/soap-rest.mdx b/advanced-configuration/transform-traffic/soap-rest.mdx new file mode 100644 index 000000000..0e292f6b8 --- /dev/null +++ b/advanced-configuration/transform-traffic/soap-rest.mdx @@ -0,0 +1,194 @@ +--- +title: "Transformation Use Case: SOAP To REST" +description: "How to transform SOAP API to REST API in Tyk" +keywords: "Traffic Transformation, SOAP, REST, SOAP to REST" +sidebarTitle: "SOAP To REST" +--- + +You can transform an existing SOAP service to a JSON REST service. This can be done from the Tyk Dashboard with no coding involved and should take around 10 minutes to perform the transform. + +We also have a video which walks you through the SOAP to REST transform. + + + +## Prerequisites + +An existing SOAP service and the WSDL definition. For this example, we will use: + +- Upstream Target - [https://www.dataaccess.com/webservicesserver/numberconversion.wso](https://www.dataaccess.com/webservicesserver/numberconversion.wso) +- The WSDL definition from - [https://www.dataaccess.com/webservicesserver/numberconversion.wso?WSDL](https://www.dataaccess.com/webservicesserver/numberconversion.wso?WSDL) +- Postman Client (or other endpoint testing tool) + +## Steps for Configuration + +1. **Import the WSDL API** + + 1. Select APIs from the System Management menu + + APIs Menu + + 2. Click Import API + + Import API + + 3. Select **From WSDL** from the Import an API Definition window + 4. In the **Upstream Target** field, enter `https://www.dataaccess.com/webservicesserver/numberconversion.wso` as listed in the Prerequisites. + 5. Paste the WSDL definition from the link in Prerequisites + 6. Click **Generate API**. You should now have an API named `NumberConversion` in your API list + + NumberService API + +2. **Add the transforms to an Endpoint** + + 1. From the API list, select Edit from the Actions menu for the `NumberConversion` API + 2. Select the **Endpoint Designer** tab. You should see 2 POST endpoints that were imported. We will apply the transforms to the `NumberToWords` endpoint. + + Endpoints + + 3. Expand the `NumberToWords` endpoint. The following plugins should have been added as part of the import process. + + - URL rewrite + - Track endpoint + + + + + To make the URL a little friendlier, we're going to amend the Relative Path to just `/NumberToWords`. Update your API after doing this. + + + + 4. Add the following plugins from the **Plugins** drop-down list: + + - Body transform + - Modify headers + +3. **Modify the Body Transform Plugin** + + **Set up the Request** + + We use the `{{.FieldName}}` Golang template syntax to access the JSON request. For this template we will use `{{.numberToConvert}}`. + + 1. Expand the Body transform plugin. From the Request tab, copy the following into the Template section: + + ```xml + + + + + {{.numberToConvert}} + + + + ``` + + 2. In the Input field, enter the following: + + ```json + { + "numberToConvert": 35 + } + ``` + + + + The '35' integer can be any number you want to convert + + + + 3. Click **Test**. You should get the following in the Output field: + + ```xml + + + + + 35 + + + + ``` + + **Set up the Response** + + Again, for the response, we will be using the `{{.FieldName}}` syntax as the following `{{.Envelope.Body.NumberToDollarsResponse.NumberToDollarsResult}}` + + 1. For the Input Type, select XML + + Response Input Type + + 2. In the Template section enter: + + ```yaml + { + "convertedNumber": "{{.Envelope.Body.NumberToDollarsResponse.NumberToDollarsResult}}" + } + ``` + 3. Enter the following into the input field: + + ```xml + + + + thirty five dollars + + + + ``` + 4. Click Test. You should get the following in the Output field: + + ```json + { + "convertedNumber": "thirty five dollars" + } + ``` + +5. **Change the Content-Type Header** + + We now need to change the `content-type` header to allow the SOAP service to receive the payload in XML. We do this by using the **Modify header** plugin + + 1. Expand the Modify Header plugin + 2. From the **Request** tab enter the following in the **Add this header** section + + - Header Name: `content-type` + - Header Value: `text/xml` + + 3. Click Add + + Modify Header Request + + 4. From the **Response** tab enter the following in the **Add this header** section + + - Header Name: `content-type` + - Header Value: `application/json` + + Modify Header Response + + 5. Click **Add** + 6. Click **Update** + + Update API + +## Testing the Endpoint + +You now need to test the endpoint. We are going to use Postman. + + + +We have not set up any Authentication for this API, it has defaulted to `Open (Keyless)`. + + + + +1. Copy the URL for your NumberConversion API with the NumberToWords endpoint - `https://tyk-url/numberconversion/NumberToWords/` +2. Paste it as a POST URL in the Postman URL Request field +3. Enter the following as a raw Body request + +```json +{ + "numberToConvert": 35 +} +``` +Your Postman request should look similar to below (apart from the URL used) + +Postman + diff --git a/advanced-configuration/websockets.mdx b/advanced-configuration/websockets.mdx new file mode 100644 index 000000000..66ffc9f82 --- /dev/null +++ b/advanced-configuration/websockets.mdx @@ -0,0 +1,58 @@ +--- +title: "Websockets" +description: "How to use websockets in Tyk" +keywords: "websockets, Other Protocol" +sidebarTitle: "Websockets" +--- + +As from Tyk gateway v2.2, Tyk supports transparent WebSocket connection upgrades. To enable this feature, set the `enable_websockets` value to `true` in your `tyk.conf` file. + +WebSocket proxying is transparent, Tyk will not modify the frames that are sent between client and host, and rate limits are on a per-connection, not per-frame basis. + +The WebSocket upgrade is the last middleware to fire in a Tyk request cycle, and so can make use of HA capabilities such as circuit breakers and enforced timeouts. + +Tyk needs to decrypt the inbound and re-encrypt the outbound for the copy operations to work, Tyk does not just pass through the WebSocket. When the target is on default SSL port you must explicitly specify the target url for the API: + +```{.copyWrapper} +https://target:443/ +``` + +## WebSocket Example + +We are going to set up Tyk with a WebSocket proxy using our [Tyk Pro Docker Demo](https://github.com/TykTechnologies/tyk-pro-docker-demo) installation. + +We will be using the [Postman WebSocket Echo Service](https://blog.postman.com/introducing-postman-websocket-echo-service/) to test the connection. + +**Steps for Configuration** + +1. **Setup the API in Tyk** + + Create a new API in Tyk. For this demo we are going to select Open (Keyless) as the **Authentication mode**. + + Set the **Target URL** to `wss://ws.postman-echo.com/raw` + +2. **Test the Connection** + + 1. From Postman, select **File > New > WebSocket Request** (or from **Workspace > New > WebSocket Request** if using the web based version). + + Postman WebSocket Request + + 2. Enter your Tyk API URL in the **Enter server URL** field (minus the protocol). + 3. Enter some text in the **New Message** field and click **Send**. + 4. You will see a successful connection. + + Postman WebSocket Connection Result + + + + + If your API uses an Authentication mode other than Open (Keyless), add the details in the Header tab. + + + +An example Header configuration for using an Authentication Token with an API: + +Postman WebSocket Connection Result with Authorization token + +See the [Access an API](/api-management/gateway-config-managing-classic#access-an-api) tutorial for details on adding an Authentication Token to your APIs. + diff --git a/ai-management/ai-studio/ai-portal.mdx b/ai-management/ai-studio/ai-portal.mdx new file mode 100644 index 000000000..71cdae376 --- /dev/null +++ b/ai-management/ai-studio/ai-portal.mdx @@ -0,0 +1,125 @@ +--- +title: "AI Portal" +description: "How AI Portal works?" +keywords: "AI Studio, AI Management, AI Portal" +sidebarTitle: "AI Portal" +--- + +The Tyk AI Studio's AI Portal provides a user-friendly web interface where end users can interact with configured AI capabilities. It serves as the primary access point for users to engage with Chat Experiences, view documentation, and manage their account settings. + +## Purpose + +The main goals of the AI Portal are: + +* **Unified User Experience:** Offer a cohesive interface for accessing all AI capabilities configured within Tyk AI Studio. +* **Self-Service Access:** Enable users to independently access and utilize AI features without administrator intervention. +* **Contextual Documentation:** Provide integrated documentation and guidance for available AI services. +* **Account Management:** Allow users to manage their own profile settings and view usage information. +* **Secure Access Control:** Enforce permissions based on teams and organizational policies. + +## Key Features + +* **Chat Interface:** Access to all [Chat Experiences](/ai-management/ai-studio/chat-interface) the user has permission to use, with a clean, intuitive UI for conversational interactions. +* **Resource Catalogues:** Browse and subscribe to available LLMs, Data Sources, and [Tools](/ai-management/ai-studio/tools) through dedicated catalogue interfaces. +* **Application Management:** Create and manage [Apps](/ai-management/ai-studio/apps) that integrate LLMs, tools, and data sources for API access. +* **Documentation Hub:** Integrated documentation for available AI services, tools, and data sources. +* **User Profile Management:** Self-service capabilities for updating profile information and preferences. +* **History & Favorites:** Access to past chat sessions and ability to bookmark favorite conversations. +* **Responsive Design:** Optimized for both desktop and mobile devices for consistent access across platforms. +* **Customizable Themes:** Support for light/dark mode and potentially organization-specific branding. +* **Notifications:** System alerts and updates relevant to the user's AI interactions. + +## Using the AI Portal + +Users access the AI Portal through a web browser at the configured URL for their Tyk AI Studio installation. + +1. **Authentication:** Users log in using their credentials (username/password, SSO, or other configured authentication methods). +2. **Home Dashboard:** Upon login, users see a dashboard with available Chat Experiences and recent activity. +3. **Resource Discovery:** Users can browse catalogues of available LLMs, Data Sources, and Tools to which they have access. +4. **Application Creation:** Users can create Apps by selecting and subscribing to the LLMs, tools, and data sources they need. +5. **Chat Selection:** Users can select from available Chat Experiences to start or continue conversations. +6. **Documentation Access:** Users can browse integrated documentation to learn about available capabilities. +7. **Profile Management:** Users can update their profile settings, preferences, and view usage statistics. + + AI Portal Dashboard + +## Configuration (Admin) + +Administrators configure the AI Portal through the Tyk AI Studio admin interface: + +* **Portal Branding:** Customize logos, colors, and themes to match organizational branding. +* **Available Features:** Enable or disable specific portal features (chat, documentation, etc.). +* **Authentication Methods:** Configure login options (local accounts, SSO integration, etc.). +* **Default Settings:** Set system-wide defaults for user experiences. +* **Access Control:** Manage which teams can access the portal and specific features within it. +* **Custom Content:** Add organization-specific documentation, welcome messages, or announcements. + + Portal Configuration + +## API Access + +While the AI Portal primarily provides a web-based user interface, it is built on top of the same APIs that power the rest of Tyk AI Studio. Developers can access these APIs directly for custom integrations: + +* **Authentication API:** `/api/v1/auth/...` endpoints for managing user sessions. +* **Chat API:** `/api/v1/chat/...` endpoints for programmatic access to chat functionality. +* **User Profile API:** `/api/v1/users/...` endpoints for managing user information. +* **Datasource API:** `/datasource/{dsSlug}` endpoint for directly querying configured data sources. +* **Tools API:** `/api/v1/tools/...` endpoints for discovering and invoking available tools. +* **Applications API:** `/api/v1/apps/...` endpoints for managing user applications and their resource subscriptions. + +### Datasource API + +The Datasource API allows direct semantic search against configured vector stores: + +* **Endpoint:** `/datasource/{dsSlug}` (where `{dsSlug}` is the datasource identifier) +* **Method:** POST +* **Authentication:** Bearer token required +* **Request Format:** + ```json + { + "query": "your search query here", + "n": 5 // optional, number of results to return (default: 3) + } + ``` +* **Response Format:** + ```json + { + "documents": [ + { + "content": "text content of the document chunk", + "metadata": { + "source": "filename.pdf", + "page": 42 + } + }, + // additional results... + ] + } + ``` + +**Important Note:** The endpoint does not accept a trailing slash. Use `/datasource/{dsSlug}` and not `/datasource/{dsSlug}/`. + +### Tools API + +The Tools API provides programmatic access to available tools and their capabilities: + +* **Tool Discovery:** `/api/v1/tools/` - List available tools and their specifications +* **Tool Invocation:** `/api/v1/tools/{toolId}/invoke` - Execute specific tool operations +* **Tool Documentation:** `/api/v1/tools/{toolId}/docs` - Retrieve tool usage documentation + +### MCP (Model Context Protocol) Access + +Tools can also be accessed through the **Model Context Protocol (MCP)**, providing standardized tool integration: + +* **MCP Server Endpoint:** `/mcp` - Connect MCP-compatible clients to access tools +* **Protocol Compliance:** Supports the full MCP specification for tool discovery and execution +* **Client Libraries:** Compatible with popular MCP client implementations across different programming languages + +This multi-protocol approach enables developers to: +* Use familiar MCP tooling and libraries +* Integrate with existing MCP-enabled workflows +* Maintain consistency across different AI platforms and tools + +This API-first approach ensures that all functionality available through the AI Portal can also be accessed programmatically for custom applications or integrations. + +The AI Portal serves as the primary touchpoint for end users interacting with AI capabilities managed by Tyk AI Studio, providing a secure, intuitive, and feature-rich experience. diff --git a/ai-management/ai-studio/ai-studio-swagger.mdx b/ai-management/ai-studio/ai-studio-swagger.mdx new file mode 100644 index 000000000..4a34fc1f5 --- /dev/null +++ b/ai-management/ai-studio/ai-studio-swagger.mdx @@ -0,0 +1,8 @@ +--- +title: "Tyk AI Studio API" +description: "Tyk AI Studio API" +keywords: "OpenAPI Spec for AI Studio, Tyk AI Studio OAS, Tyk AI Portal REST" +sidebarTitle: "Overview" +--- + +This is the API for the AI Studio user and group management system. \ No newline at end of file diff --git a/ai-management/ai-studio/analytics.mdx b/ai-management/ai-studio/analytics.mdx new file mode 100644 index 000000000..f796d3e63 --- /dev/null +++ b/ai-management/ai-studio/analytics.mdx @@ -0,0 +1,76 @@ +--- +title: "Analytics & Monitoring" +description: "How to configure analytics in Tyk AI Studio?" +keywords: "AI Studio, AI Management, Analytics, Monitoring" +sidebarTitle: "Analytics & Monitoring" +--- + +Tyk AI Studio incorporates an Analytics System designed to collect, aggregate, and provide insights into the usage, cost, and performance of the platform's core components, particularly LLMs, Tools, and Chat interactions. + +## Purpose + +The Analytics System serves several key purposes: + +* **Cost Tracking:** Monitor spending associated with different LLM providers and models. +* **Usage Monitoring:** Understand how users and applications are interacting with LLMs and Tools. +* **Performance Analysis:** Track metrics like latency and token counts for LLM requests. +* **Auditing & Debugging:** Provide detailed logs of interactions for troubleshooting and security analysis. +* **Reporting:** Offer data points for dashboards and reports for administrators and potentially end-users. + +## Data Collection + +The primary point of data collection is the **[Proxy & API Gateway](/ai-management/ai-studio/proxy)**. As requests flow through the proxy: + +1. **Request Details:** Information about the incoming request is captured (e.g., user ID, application ID, requested LLM/route, timestamp). +2. **LLM Interaction:** Details about the interaction with the backend LLM are recorded (e.g., model used, prompt tokens, completion tokens, latency). +3. **Cost Calculation:** Using data from the [Model Pricing System](/ai-management/ai-studio/llm-management), the cost of the interaction is calculated based on token counts. +4. **Tool Usage:** If the interaction involved [Tools](/ai-management/ai-studio/tools), relevant details might be logged (e.g., which tool was called, success/failure). +5. **Chat Context:** For interactions originating from the [Chat Interface](/ai-management/ai-studio/chat-interface), metadata about the chat session might be included. + +## Architecture + +* **Asynchronous Ingestion:** To minimize impact on request latency, analytics data is typically collected by the Proxy and sent asynchronously to a dedicated analytics database or processing pipeline. +* **Data Storage:** A suitable database (e.g., time-series database like InfluxDB, relational database like PostgreSQL, or a data warehouse) stores the aggregated analytics records. +* **API Endpoints:** Tyk AI Studio exposes internal API endpoints that allow the Admin UI (and potentially other authorized services) to query the aggregated analytics data. + +## Key Metrics Tracked (Examples) + +* **Per LLM Request:** + * Timestamp + * User ID / API Key ID + * LLM Configuration ID / Route ID + * Model Name + * Prompt Tokens + * Completion Tokens + * Total Tokens + * Calculated Cost + * Latency (ms) + * Success/Error Status +* **Per Tool Call (if applicable):** + * Timestamp + * Tool ID + * Success/Error Status + * Latency (ms) +* **Aggregated Metrics:** + * Total cost per user/application/LLM over time. + * Total requests per user/application/LLM over time. + * Average latency per LLM. + * Most frequently used models/tools. + +## Monitoring & Dashboards (Admin) + +Administrators typically access analytics data via dashboards within the Tyk AI Studio UI. + +* **Overview:** High-level summaries of cost, usage, and requests. +* **Filtering & Grouping:** Ability to filter data by time range, user, application, LLM configuration, etc. +* **Visualizations:** Charts and graphs showing trends in cost, token usage, request volume, and latency. +* **Detailed Logs:** Access to raw or near-raw event logs for specific interactions (useful for debugging). + + Analytics Dashboard + +## Integration with Other Systems + +* **[Budget Control](/ai-management/ai-studio/llm-management):** Analytics data (specifically cost) is likely used by the Budget Control system to track spending against defined limits. +* **[Model Pricing](/ai-management/ai-studio/llm-management):** The pricing definitions are crucial for calculating the cost metric within the analytics system. + +By providing detailed analytics, Tyk AI Studio enables organizations to effectively manage costs, understand usage patterns, and ensure the optimal performance of their AI interactions. diff --git a/ai-management/ai-studio/apps.mdx b/ai-management/ai-studio/apps.mdx new file mode 100644 index 000000000..3fdc47cee --- /dev/null +++ b/ai-management/ai-studio/apps.mdx @@ -0,0 +1,298 @@ +--- +title: "Apps View for Tyk AI Studio" +description: "How to configure apps in AI Studio?" +keywords: "AI Studio, AI Management, Apps" +--- + +The **Apps View** is used to manage user-created applications that interact with Large Language Models (LLMs) and data sources via the Tyk AI Gateway. These apps provide a mechanism for users to define and encapsulate the functionality and resources they need for API interactions. Below is a detailed overview of the Apps View and its functionality. + +--- + +#### **Apps List Overview** + +1. **Name**: + - The name of the application created by the user (e.g., `My New App`, `Jira Task Refiner`). + +2. **Description**: + - A brief explanation of the app's purpose or functionality (e.g., "Experiment to refine tasks for Jira"). + +3. **User**: + - The name of the user who created or owns the application. + +4. **Actions**: + - A menu (three-dot icon) for performing app-related actions, such as: + - Viewing app details. + - Editing the app configuration. + - Deleting the app. + +--- + +#### **Features** + +1. **Add App Button**: + - A green button labeled **+ ADD APP**, located in the top-right corner. Clicking this button opens a form for creating a new app. + +2. **Pagination Control**: + - Located at the bottom-left, this dropdown allows administrators to adjust the number of apps displayed per page. + +--- + +#### **Purpose of Apps** + +1. **Encapsulation of Resources**: + - Apps bundle together the LLMs, tools, and data sources a user needs to access through the AI Gateway. + - Users can subscribe to tools from the [AI Portal](/ai-management/ai-studio/ai-portal) tool catalogue, similar to how they access LLMs and data sources. + +2. **RESTful Access via Credentials**: + - Each app is linked to a set of credentials that users can use to authenticate API requests. + - Credentials must be activated by an administrator after an app is submitted. + +3. **Integration with Upstream LLMs and Tools**: + - Apps allow users to access LLMs through the following methods: + - **Vendor-Native SDKs**: Direct interaction with the upstream provider's SDK. + - **OpenAI-Compatible API**: Requests translated into a standardized API format. + - Tools can be accessed through multiple protocols: + - **REST APIs**: Direct HTTP API access to tool endpoints. + - **MCP (Model Context Protocol)**: Standardized tool interaction protocol for enhanced compatibility. + +4. **Centralized Administration**: + - Administrators can manage all user-created apps, ensuring proper governance and access control. + +--- + +#### **Example Workflow** + +1. **App Creation**: + - A user creates an app and specifies the LLMs, tools, and data sources required for their use case. + - Tools can be discovered and subscribed to through the AI Portal's tool catalogue interface. + +2. **Credential Activation**: + - Once the app is submitted, an admin reviews and activates the app's credentials, enabling the app to interact with the AI Gateway. + +3. **API Integration**: + - The user integrates their app with their systems using the provided credentials and API endpoints. + - Multiple access methods are available: REST APIs, OpenAI-compatible APIs, and MCP protocol. + +4. **Real-Time Usage**: + - The app facilitates communication with the specified resources via the AI Gateway using the chosen protocol. + +--- + +#### **Benefits** + +1. **Streamlined Access**: + - Users can consolidate all the resources they need into a single app, simplifying integration workflows. + +2. **Governance and Security**: + - Admin-controlled credential activation ensures that only approved apps can access the AI Gateway. + +3. **Flexibility**: + - Support for both vendor-native SDKs and OpenAI-compatible APIs allows users to integrate apps into diverse environments. + +4. **Centralized Management**: + - Admins can oversee all user-created apps, ensuring compliance with organizational policies. + +--- + +The **Apps View** is a key feature of the Tyk AI Studio, enabling users to define, configure, and securely interact with LLMs and data sources through the AI Gateway while ensuring robust governance and control. + +### App Details and Proxy Logs + +The **App Details View** provides a comprehensive overview of a specific app, including its performance metrics, usage data, and traffic logs. This view is designed to help administrators monitor the app's activity and ensure compliance with governance policies. + +--- + +#### **Sections and Features** + +### **App Token Usage and Cost** +- **Graph**: + - Displays token usage and cost over a specified date range. + - Tracks the app's performance by visualizing the volume of tokens consumed and associated costs. + +- **Date Range Selector**: + - Allows filtering of token usage and cost data for specific start and end dates. + +--- + +### **App Information** +1. **Name**: + - The name of the app (e.g., `Jira Task Refiner`). + +2. **Description**: + - A summary of the app's purpose (e.g., "Experiment to refine tasks for Jira"). + +3. **User**: + - The creator or owner of the app (e.g., `Jeffy Mathew`). + +4. **LLMs**: + - The LLMs used by the app (if specified). + +5. **Datasources**: + - Lists the data sources configured for use with this app (e.g., `Tyk Documentation`). + +6. **Tools**: + - Shows the tools subscribed to and available for this app (e.g., `JIRA API`, `Weather Service`). + +--- + +### **Credential Information** +1. **Key ID**: + - The unique identifier for the app's credentials. + +2. **Secret**: + - The app's secret key, masked for security purposes. + +3. **Active Status**: + - Indicates whether the app's credentials are active. + - Admins must activate credentials after the app is submitted. + +--- + +### **Proxy Logs** +- **Purpose**: + - Provides a detailed log of inbound and outbound requests processed by the app for governance and troubleshooting purposes. + +- **Columns**: + - **Timestamp**: The date and time of the request. + - **Vendor**: The upstream LLM vendor handling the request. + - **Response Code**: The HTTP status code of the response. + - **Request**: Details of the inbound request. + - **Response**: The response returned by the upstream LLM or tool. + +- **Pagination**: + - Allows administrators to navigate through logs in batches. + +--- + +### **Action Buttons** +1. **Edit App**: + - Opens the app configuration form for editing details such as name, description, LLMs, and data sources. + +2. **Back to Apps**: + - Navigates back to the **Apps List View**. + +--- + +#### **Purpose of the App Details View** + +1. **Monitoring Performance**: + - The token usage and cost graph provides insight into how efficiently the app is utilizing resources. + +2. **Governance and Compliance**: + - Proxy logs and credential management ensure transparency and compliance with organizational policies. + +3. **Troubleshooting**: + - Administrators can use detailed request and response logs to identify and resolve issues. + +4. **Security**: + - Credential status and masking ensure that sensitive information is handled securely. + +--- + +This **App Details View** is an essential feature for administrators to monitor, manage, and ensure the secure operation of user-created apps within the Tyk AI Studio. + +### App Editing and Credential Approval + +The **App Edit View** allows administrators to modify app details and manage credentials for user-created applications. Most importantly, this view provides controls to **approve or reject** an app by activating or deactivating its credentials. + +--- + +#### **Sections and Features** + +### **App Details** +1. **Name** *(Required)*: + - Editable field for the app's name (e.g., `Jira Task Refiner`). + +2. **Description** *(Optional)*: + - A short summary of the app's functionality or purpose (e.g., "Experiment to refine tasks for Jira"). + +3. **User** *(Read-Only)*: + - Displays the user who created the app (e.g., `Jeffy Mathew`). + +4. **LLMs** *(Dropdown)*: + - Select or modify the Large Language Models associated with this app. + - Example: OpenAI's GPT-4, Anthropic's Claude. + +5. **Datasources** *(Dropdown)*: + - Add or remove data sources the app can access. + - Example: `Tyk Documentation`. + +6. **Tools** *(Dropdown)*: + - Subscribe to or remove tools the app can use. + - Tools are available from the AI Portal tool catalogue. + - Example: `JIRA API`, `Weather Service`. + +--- + +### **Credential Information** +1. **Key ID** *(Read-Only)*: + - The unique identifier for the app's credentials. + +2. **Secret** *(Masked)*: + - A secure key that is masked by default but used for API integration. + +3. **Active Toggle**: + - **Purpose**: Approve or reject the app by activating or deactivating its credentials. + - **States**: + - **Inactive**: The app is not approved, and credentials cannot be used. + - **Active**: The app is approved, enabling API access via the AI Gateway. + +--- + +### **Action Buttons** +1. **Update App**: + - Saves changes made to the app's details and credentials. + +2. **Back to Apps**: + - Navigates back to the **Apps List View** without saving changes. + +--- + +#### **Use Cases** + +1. **Approval Workflow**: + - After a user submits an app, administrators can review its configuration and activate the credentials if it complies with governance policies. + +2. **Editing App Details**: + - Admins can modify the app's name, description, associated LLMs, and data sources to refine its configuration. + +3. **Credential Management**: + - Credentials can be deactivated if the app no longer complies with organizational requirements. + +4. **Governance and Security**: + - Ensures apps have controlled access to LLMs and data sources, adhering to organizational policies. + +--- + +#### **Purpose and Benefits** + +1. **Enhanced Control**: + - Enables administrators to manage the lifecycle of user-created apps, ensuring proper governance. + +2. **Simplified Approval Process**: + - The credential activation toggle streamlines the process of approving or rejecting apps. + +3. **Secure Integration**: + - Ensures only approved apps can interact with the AI Gateway, protecting sensitive data and resources. + +--- + +The **App Edit View** is a critical feature for managing user-created apps in the Tyk AI Studio, providing administrators with full control over app configuration, approval, and access management. + +### Privacy Level Validation + +When creating or updating an app, the system validates that the privacy levels of selected datasources are not higher than the privacy levels of selected LLMs. This ensures data governance and prevents sensitive data from being exposed to less secure LLMs. + +Privacy levels define how data is protected by controlling LLM access based on its sensitivity. LLM providers with lower privacy levels can't access higher-level data sources and tools, ensuring secure and appropriate data handling. + +The system works with 4 privacy levels from low to high: +- Public – Safe to share (e.g., blogs, press releases). +- Internal – Company-only info (e.g., reports, policies). +- Confidential – Sensitive business data (e.g., financials, strategies). +- Restricted (PII) – Personal data (e.g., names, emails, customer info). + +If you attempt to create or update an app with datasources that have higher privacy requirements (levels) than the selected LLMs, you'll receive an error message: "Datasources have higher privacy requirements than the selected LLMs. Please select LLMs with equal or higher privacy levels." + +To resolve this issue, either: +1. Select LLMs with higher privacy levels that match or exceed your datasource requirements +2. Use datasources with lower privacy requirements that are compatible with your selected LLMs diff --git a/ai-management/ai-studio/budgeting.mdx b/ai-management/ai-studio/budgeting.mdx new file mode 100644 index 000000000..9fa98f14c --- /dev/null +++ b/ai-management/ai-studio/budgeting.mdx @@ -0,0 +1,60 @@ +--- +title: "Budget Control" +description: "How to configure budgets in Tyk AI Studio?" +keywords: "AI Studio, AI Management, Budget Control" +sidebarTitle: "Budget Control" +--- + +Tyk AI Studio provides a Budget Control system to help organizations manage and limit spending on Large Language Model (LLM) usage. + +## Purpose + +The primary goals of the Budget Control system are: + +* **Prevent Overspending:** Set hard limits on costs associated with LLM API calls. +* **Cost Allocation:** Track and enforce spending limits at different granularities (e.g., per organization, per specific LLM configuration). +* **Predictability:** Provide better predictability for monthly AI operational costs. + +## Scope & Configuration + +Budgets are typically configured by administrators and applied at specific levels: + +* **Organization Level:** A global budget limit for all LLM usage within the organization. +* **LLM Configuration Level:** A specific budget limit tied to a particular LLM setup (e.g., a dedicated budget for a high-cost `gpt-4` configuration). +* **(Potentially) Application/User Level:** Granular budgets might be assignable to specific applications or teams (depending on implementation specifics). + +**Configuration Parameters:** + +* **Limit Amount:** The maximum monetary value allowed (e.g., $500). +* **Currency:** The currency the budget is defined in (e.g., USD). +* **Time Period:** The reset interval for the budget, typically monthly (e.g., resets on the 1st of each month). +* **Scope:** Which entity the budget applies to (Organization, specific LLM Configuration ID, etc.). + +Administrators configure these budgets via the Tyk AI Studio UI or API. + + Budget Config UI + +## Enforcement + +Budget enforcement primarily occurs at the **[Proxy & API Gateway](/ai-management/ai-studio/proxy)**: + +1. **Request Received:** The Proxy receives a request destined for an LLM. +2. **Cost Estimation:** Before forwarding the request, the Proxy might estimate the potential maximum cost (or rely on post-request cost calculation). +3. **Budget Check:** The Proxy checks the current spending against all applicable budgets (e.g., the specific LLM config budget AND the overall organization budget) for the current time period. +4. **Allow or Deny:** + * If the current spending plus the estimated/actual cost of the request does *not* exceed the limit(s), the request is allowed to proceed. + * If the request *would* cause a budget limit to be exceeded, the request is blocked, and an error is returned to the caller. + +## Integration with Other Systems + +* **[Analytics & Monitoring](/ai-management/ai-studio/analytics):** The Analytics system provides the cost data used to track spending against budgets. The current spent amount for a budget period is derived from aggregated analytics data. +* **[Model Pricing](/ai-management/ai-studio/llm-management):** The pricing definitions are essential for the Analytics system to calculate costs accurately, which in turn feeds the Budget Control system. +* **[Notification System](/ai-management/ai-studio/notifications):** Budgets can be configured to trigger notifications when spending approaches or reaches defined thresholds (e.g., alert admin when 80% of budget is consumed, notify user/admin when budget is exceeded). + +## Benefits + +* **Financial Control:** Prevents unexpected high bills from LLM usage. +* **Resource Management:** Ensures fair distribution of AI resources according to allocated budgets. +* **Accountability:** Tracks spending against specific configurations or organizational units. + +Budget Control is a critical feature for organizations looking to adopt AI technologies responsibly and manage their operational costs effectively. diff --git a/ai-management/ai-studio/call-settings.mdx b/ai-management/ai-studio/call-settings.mdx new file mode 100644 index 000000000..04368535e --- /dev/null +++ b/ai-management/ai-studio/call-settings.mdx @@ -0,0 +1,112 @@ +--- +title: "LLM Call Settings" +description: "How to configure LLMs in AI Studio?" +keywords: "AI Studio, AI Management, LLMs, Large Language Models, LLM Call Settings" +--- + +The **LLM Call Settings** section allows administrators to configure default runtime parameters for Large Language Models (LLMs) used in chat interactions and middleware system function calls. These settings provide control over how the LLM processes inputs and generates outputs. It is important to note that these settings are not utilized in the AI Gateway proxy since applications are expected to define their own model configurations. + +--- + +#### **Table Overview** +The table lists all configured call settings for available LLMs with the following columns: + +1. **Model Name**: + The specific name or version of the LLM for which the call settings apply (e.g., `claude-3.5-sonnet-20240620`, `gpt-4o`). + +2. **Temperature**: + A numerical value (e.g., `0.7`, `0.1`) that controls the randomness of the LLM's responses: + - Higher values (e.g., `0.7`) produce more creative and varied outputs. + - Lower values (e.g., `0.1`) generate more deterministic and focused responses. + +3. **Max Tokens**: + The maximum number of tokens the LLM can generate in a single response. This sets a hard limit on the length of the output to ensure efficiency and prevent excessive usage. + +4. **Actions**: + A menu (three-dot icon) with quick actions to edit or delete the call settings for a specific model. + +--- + +#### **Features** +1. **Add LLM Call Setting Button**: + A green button labeled **+ ADD LLM CALL SETTING**, located at the top-right of the view. Clicking this button opens a form to define call settings for a new model. + +2. **Pagination Dropdown**: + Found at the bottom-left corner, this dropdown allows users to control how many call settings are displayed per page (e.g., 10, 20, etc.). + +--- + +#### **Use Cases** +- **Chats**: + These settings control how the LLM responds in conversational interfaces within the Chat Room feature, allowing administrators to fine-tune the user experience. + +- **Middleware Function Calls**: + The settings guide LLM behavior in automated backend processes where the LLM is used for tasks such as data generation or content analysis. + +--- + +#### **Quick Insights** +The **LLM Call Settings** section provides administrators with granular control over LLM behavior during runtime. While the settings are not used in the proxy, they are crucial for managing system-level and chat-specific interactions, ensuring consistent performance and efficiency. This section enables streamlined configuration for application-level integration of LLMs. + +### Edit/Create Call Settings + +The **Edit/Create LLM Call Settings View** enables administrators to configure or update call-time options for a specific Large Language Model (LLM). These settings determine how the LLM processes inputs and generates outputs in chat interactions or middleware system function calls. Below is an explanation of each field and its purpose: + +--- + +#### **Form Fields and Descriptions** + +1. **Model Preset** *(Dropdown)*: + - Allows administrators to select a pre-configured preset for the LLM, or choose "Custom" to manually configure all settings. + +2. **Model Name** *(Required)*: + - Specifies the name of the LLM model these settings apply to (e.g., `claude-3.5-sonnet-20240620`). + +3. **Temperature** *(Decimal, 0.0 to 1.0)*: + - Controls the randomness of the model's responses: + - **Higher values** (e.g., `0.7`): More creative and varied outputs. + - **Lower values** (e.g., `0.1`): More deterministic and repetitive outputs. + +4. **Max Tokens** *(Integer)*: + - Defines the maximum number of tokens the LLM can generate in its response. + - Helps to limit response length for efficiency and control. + +5. **Top P** *(Decimal, 0.0 to 1.0)*: + - Controls nucleus sampling, a method to limit token selection to the most probable subset: + - **Higher values** (e.g., `0.9`): Includes more variability in token choices. + - **Lower values** (e.g., `0.1`): Focuses on the most likely tokens. + +6. **Top K** *(Integer)*: + - Limits token selection to the top `K` most probable tokens at each step: + - **Higher values** allow for more varied responses. + - **Lower values** restrict outputs to fewer options. + +7. **Min Length** *(Integer)*: + - Sets the minimum number of tokens that must be included in the model's response. + +8. **Max Length** *(Integer)*: + - Specifies the upper limit for the length of the response (e.g., 200,000 tokens). + +9. **Repetition Penalty** *(Decimal)*: + - Penalizes repeated tokens to prevent the model from generating repetitive responses. + - **Higher values** (e.g., `1.5`): Stronger penalty for repetition. + - **Lower values** (e.g., `1.0`): Little or no penalty. + +10. **System Prompt** *(Optional)*: + - A predefined instruction for the LLM that sets the tone or context for its responses. + Example: + *"You are a helpful AI assistant specializing in API management and related topics. Respond in markdown and cite sources where appropriate."* + +--- + +#### **Action Buttons** +1. **Update LLM Call Settings / Create LLM Call Settings**: + - Saves the configuration or creates a new call settings entry. This button becomes active only when all required fields are valid. + +2. **Back to LLM Call Settings**: + - A link in the top-right corner to return to the main LLM Call Settings view without saving changes. + +--- + +#### **Purpose** +This interface provides granular control over runtime parameters, allowing administrators to customize the LLM's behavior for different use cases. These settings are critical for ensuring that the model generates responses tailored to specific operational or user requirements. diff --git a/ai-management/ai-studio/catalogs.mdx b/ai-management/ai-studio/catalogs.mdx new file mode 100644 index 000000000..14ff8f308 --- /dev/null +++ b/ai-management/ai-studio/catalogs.mdx @@ -0,0 +1,76 @@ +--- +title: "Catalogs View for Tyk AI Studio" +description: "How to manage catalogs and in AI Studio?" +keywords: "AI Studio, AI Management, Catalogs" +--- + +The **Catalogs View** provides administrators with a centralized interface to group and manage related resources such as Large Language Models (LLMs), Data Sources, and Tools. By organizing these resources into catalogs, administrators can efficiently manage access control and simplify the assignment of resources to user groups. + +--- + +#### **Catalogs Overview** + +1. **Purpose**: + - Catalogs group similar resources (LLMs, Data Sources, or Tools) for streamlined management. + - Access to these resources is controlled by assigning catalogs to specific teams. + +2. **Use Cases**: + - Simplify resource management by categorizing related LLMs, Data Sources, or Tools into a single catalog. + - Apply consistent access control policies by assigning catalogs to teams. + - Manage large quantities of resources efficiently in a growing environment. + +--- + +#### **Catalogs View Layout** + +1. **Columns**: + - **Name**: + - The name of the catalog (e.g., `Main Catalogue`). + - **Resources**: + - Lists the resources included in the catalog: + - For LLMs: Displays the LLMs included (e.g., `Anthropic`, `OpenAI GPT-4`). + - For Data Sources: Shows vector or relational data sources. + - For Tools: Lists available tools (e.g., APIs, web scrapers). + - **Actions**: + - A dropdown menu to perform actions such as editing or deleting the catalog. + +2. **Add Catalog Button**: + - Located at the top-right of the view, this green button (**+ ADD CATALOG**) opens a form to create a new catalog. + +3. **Pagination Control**: + - Adjust the number of catalogs displayed per page using the dropdown at the bottom-left. + +--- + +#### **Cross-Resource Applicability** + +This view applies to the following catalog types: + +1. **LLM Catalogs**: + - Group and manage collections of LLMs, enabling administrators to easily control access to models based on their functionality or vendor. + +2. **Data Source Catalogs**: + - Organize vector or structured data sources into catalogs for efficient assignment and governance. + +3. **Tool Catalogs**: + - Categorize tools (e.g., APIs, search utilities) into logical groups to streamline their use in chat rooms or applications. + +--- + +#### **Benefits** + +1. **Centralized Management**: + - Simplifies resource organization and reduces redundancy. + +2. **Efficient Governance**: + - Allows for consistent application of access control policies across groups. + +3. **Scalability**: + - Handles large numbers of resources effectively as organizational needs evolve. + +4. **Flexible Integration**: + - Ensures that related resources are grouped for seamless assignment to teams or chat rooms. + +--- + +This generalized view supports catalog management for all resource types, providing a uniform approach to simplify administration while maintaining security and organizational efficiency. diff --git a/ai-management/ai-studio/chat-interface.mdx b/ai-management/ai-studio/chat-interface.mdx new file mode 100644 index 000000000..7116f398d --- /dev/null +++ b/ai-management/ai-studio/chat-interface.mdx @@ -0,0 +1,61 @@ +--- +title: "Chat Interface" +description: "How AI Studios Chat Interface works?" +keywords: "AI Studio, AI Management, Chat Interface" +sidebarTitle: "Chat Interface" +--- + +# Chat Interface + +Tyk AI Studio's Chat Interface provides a secure and interactive environment for users to engage with Large Language Models (LLMs), leveraging integrated tools and data sources. It serves as the primary front-end for conversational AI interactions within the platform. + +## Purpose + +The main goals of the Chat Interface are: + +* **User-Friendly Interaction:** Offer an intuitive web-based chat experience for users of all technical levels. +* **Unified Access:** Provide a single point of access to various configured LLMs, Tools, and Data Sources. +* **Context Management:** Maintain conversation history and manage context, including system prompts and retrieved data (RAG). +* **Secure & Governed:** Enforce access controls based on teams and apply configured Filters. + +## Key Features + +* **Chat Sessions:** Each conversation happens within a session, preserving history and context. +* **Streaming Responses:** LLM responses are streamed back to the user for a more interactive feel. +* **Tool Integration:** Seamlessly uses configured [Tools](/ai-management/ai-studio/tools) when the LLM determines they are necessary to fulfill a user's request. The available tools depend on the Chat Experience configuration and the user's group permissions. +* **Data Source (RAG) Integration:** Can automatically query configured [Data Sources](/ai-management/ai-studio/datasources-rag) to retrieve relevant information (Retrieval-Augmented Generation) to enhance LLM responses. The available data sources depend on the Chat Experience configuration and the user's group permissions. +* **System Prompts:** Administrators can define specific system prompts for different Chat Experiences to guide the LLM's persona, tone, and behavior. +* **History:** Users can view their past chat sessions. +* **File Upload (Context):** Users might be able to upload files directly within a chat to provide temporary context for the LLM (depending on configuration). +* **Access Control:** Users only see and can interact with Chat Experiences assigned to their Teams. + +## Using the Chat Interface + +Users access the Chat Interface through the Tyk AI Studio web UI. + +1. **Select Chat Experience:** Users choose from a list of available Chat Experiences (pre-configured chat environments) they have access to. +2. **Interact:** Users type their prompts or questions. +3. **Receive Responses:** The LLM processes the request, potentially using tools or data sources behind the scenes, and streams the response back. + + Chat UI + +## Configuration (Admin) + +Administrators configure the available "Chat Experiences" (formerly known as Chat Rooms) via the UI or API. Configuration involves: + +* **Naming:** Giving the Chat Experience a descriptive name. +* **Assigning LLM:** Linking to a specific [LLM Configuration](/ai-management/ai-studio/llm-management). +* **Enabling Tools:** Selecting which [Tool Catalogues](/ai-management/ai-studio/tools) are available. +* **Enabling Data Sources:** Selecting which [Data Source Catalogues](/ai-management/ai-studio/datasources-rag) are available. +* **Setting System Prompt:** Defining the guiding prompt for the LLM. +* **Applying Filters:** Associating specific [Filters](/ai-management/ai-studio/filters) for governance. +* **Assigning Groups:** Determining which Teams can access this Chat Experience. +* **Enabling/Disabling Features:** Toggling features like file uploads or direct tool usage. + + Chat Config + +## API Access + +Beyond the UI, Tyk AI Studio provides APIs (`/api/v1/chat/...`) for programmatic interaction with the chat system, allowing developers to build custom applications or integrations that leverage the configured Chat Experiences. + +This comprehensive system provides a powerful yet controlled way for users to interact with AI capabilities managed by Tyk AI Studio. diff --git a/ai-management/ai-studio/configuration.mdx b/ai-management/ai-studio/configuration.mdx new file mode 100644 index 000000000..9326694e8 --- /dev/null +++ b/ai-management/ai-studio/configuration.mdx @@ -0,0 +1,83 @@ +--- +title: "First Steps" +description: "Essential first steps to configure Tyk AI Studio after installation" +keywords: "AI Studio, AI Management, Configuration" +sidebarTitle: "First Steps" +--- + +This guide covers the essential first steps to take within the Tyk AI Studio UI after successfully deploying the platform and registering your first user. + +## 1. First Login + +After completing the [installation process](/ai-management/ai-studio/quickstart) and registering your first user: + +1. **Access the UI:** Open your web browser and navigate to your configured `SITE_URL` +2. **Admin Login:** Log in using the administrator account you created during registration: + * **Email:** The email address you used during registration (should match your `ADMIN_EMAIL` environment variable) + * **Password:** The password you set during the registration process + + Login Screen + + + + + **Reminder**: If you haven't completed the initial registration yet, go back to your [installation guide](/ai-management/ai-studio/quickstart) and follow the "First User Registration" section. + + + +## 2. Configure Your First LLM + +One of the most common initial steps is connecting Tyk AI Studio to an LLM provider. + +1. **Navigate to LLM Management:** In the admin UI sidebar, find the section for LLM Management (or similar) and select it. +2. **Add LLM Configuration:** Click the button to add a new LLM Configuration. +3. **Select Provider:** Choose the LLM provider you want to connect (e.g., OpenAI, Anthropic, Azure OpenAI). +4. **Enter Details:** + * **Configuration Name:** Give it a recognizable name (e.g., `OpenAI-GPT-4o`). + * **Model Name(s):** Specify the exact model identifier(s) provided by the vendor (e.g., `gpt-4o`, `gpt-4-turbo`). + * **API Key (Using Secrets):** + * **IMPORTANT:** Do *not* paste your API key directly here. Instead, use [Secrets Management](/ai-management/ai-studio/secrets). + * If you haven't already, go to the **Secrets** section in the admin UI and create a new secret: + * **Variable Name:** `OPENAI_API_KEY` (or similar) + * **Secret Value:** Paste your actual OpenAI API key here. + * Save the secret. + * Return to the LLM Configuration screen. + * In the API Key field, enter the secret reference: `$SECRET/OPENAI_API_KEY` (using the exact Variable Name you created). + * **Other Parameters:** Configure any other provider-specific settings (e.g., Base URL for Azure/custom endpoints, default temperature, etc.). +5. **Save:** Save the LLM configuration. + + LLM Config UI + +This LLM is now available for use within Tyk AI Studio, subject to [User/Group permissions](/ai-management/ai-studio/user-management). + +For more details, see the [LLM Management](/ai-management/ai-studio/llm-management) documentation. + +## 3. Verify Core System Settings + +While most core settings are configured during deployment, you can usually review them within the administration UI: + +* **Site URL:** Check that the base URL for accessing the portal is correct. +* **Email Configuration:** If using features like user invites or notifications, ensure SMTP settings are correctly configured and test email delivery if possible ([Notifications](/ai-management/ai-studio/notifications)). + +## 4. Configuration Reference (Deployment) + +Remember that fundamental system parameters are typically set via environment variables or Helm values *during deployment*. This includes: + +* Database Connection (`DATABASE_TYPE`, `DATABASE_URL`) +* License Key (`TYK_AI_LICENSE`) +* Secrets Encryption Key (`TYK_AI_SECRET_KEY`) +* Base URL (`SITE_URL`) +* Email Server Settings (`SMTP_*`, `FROM_EMAIL`, `ADMIN_EMAIL`) +* Registration Settings (`ALLOW_REGISTRATIONS`, `FILTER_SIGNUP_DOMAINS`) + +Refer to the **Configuration Options** detailed within the [Installation Guide](/ai-management/ai-studio/deployment-k8s) for specifics on setting these values during the deployment process. + +## Next Steps + +With the initial configuration complete, you can now: + +* Explore [User Management](/ai-management/ai-studio/user-management) to create users and groups. +* Set up [Tools](/ai-management/ai-studio/tools) for external API integration. +* Configure [Data Sources](/ai-management/ai-studio/datasources-rag) for RAG. +* Define [Filters](/ai-management/ai-studio/filters) for custom request/response logic. +* Try out the [Chat Interface](/ai-management/ai-studio/chat-interface). diff --git a/ai-management/ai-studio/core-concepts.mdx b/ai-management/ai-studio/core-concepts.mdx new file mode 100644 index 000000000..d92c3c43a --- /dev/null +++ b/ai-management/ai-studio/core-concepts.mdx @@ -0,0 +1,51 @@ +--- +title: "What is AI Studio?" +description: "Introduction to Tyk AI Studio - comprehensive AI management platform" +keywords: "AI Studio, AI Management, Introduction" +sidebarTitle: "What is AI Studio?" +--- + +Tyk AI Studio is a comprehensive platform that enables organizations to manage, govern, and deploy AI applications with enterprise-grade security, control, and observability. Before diving into installation and configuration, let's understand what AI Studio offers and its core concepts. + +## Key Components & Philosophy + +Tyk AI Studio is designed as a secure, observable, and extensible gateway for interacting with Large Language Models (LLMs) and other AI services. Key architectural pillars include: + +* **[AI Gateway](/ai-management/ai-studio/proxy):** The central gateway managing all interactions between your applications and various LLM providers. It enforces policies, logs activity, and handles vendor abstraction. +* **AI Portal:** Empowers developers with a curated catalog of AI tools and services for faster innovation. +* **[Chat](/ai-management/ai-studio/chat-interface):** Provides a secure and interactive environment for users to engage with LLMs, leveraging integrated tools and data sources. +* **[User Management & RBAC](/ai-management/ai-studio/user-management):** Securely manages users, groups, and permissions. Access to resources like LLMs, Tools, and Data Sources is controlled via group memberships. +* **Extensibility ([Tools](/ai-management/ai-studio/tools) & [Data Sources](/ai-management/ai-studio/datasources-rag)):** Allows integrating external APIs (Tools) and vector databases (Data Sources) into LLM workflows (e.g., for Retrieval-Augmented Generation - RAG). +* **Policy Enforcement ([Filters](/ai-management/ai-studio/filters)):** Intercept and modify LLM requests/responses using custom scripts to enforce specific rules or data transformations. +* **Configuration over Code:** Many aspects like LLM parameters, Filters, and [Budgets](/ai-management/ai-studio/budgeting) are configured through the UI/API rather than requiring code changes. +* **Security First:** Features like [Secrets Management](/ai-management/ai-studio/secrets), [SSO integration](/ai-management/ai-studio/sso), and fine-grained access control are integral to the platform. +* **Observability:** Includes systems for [Analytics & Monitoring](/ai-management/ai-studio/analytics) and [Notifications](/ai-management/ai-studio/notifications) to track usage, costs, and system events. + +## Core Entities + +Understanding these entities is crucial: + +* **[User](/ai-management/ai-studio/user-management#core-concepts):** Represents an individual interacting with Tyk AI Studio, managed within the User Management system. +* **[Group](/ai-management/ai-studio/user-management#core-concepts):** Collections of users, the primary mechanism for assigning access rights to resources via RBAC. +* **[API Key](/ai-management/ai-studio/user-management#core-concepts):** Credentials generated by Users to allow applications or scripts programmatic access to Tyk AI Studio APIs (like the Proxy), inheriting the User's permissions. +* **[LLM Configuration](/ai-management/ai-studio/llm-management):** Represents a specific LLM provider and model setup (e.g., OpenAI GPT-4, Anthropic Claude 3), including parameters and potentially associated [pricing](/ai-management/ai-studio/llm-management) and [budgets](/ai-management/ai-studio/budgeting). +* **[Tool](/ai-management/ai-studio/tools):** Definitions of external APIs (via OpenAPI spec) that can be invoked by LLMs during chat sessions to perform actions or retrieve external data. +* **[Data Source](/ai-management/ai-studio/datasources-rag):** Connections to vector databases or other data repositories used for Retrieval-Augmented Generation (RAG) within chat sessions. +* **[Catalogue](/ai-management/ai-studio/tools) ([Tools](/ai-management/ai-studio/tools) / [Data Sources](/ai-management/ai-studio/datasources-rag)):** Collections that group related Tools or Data Sources for easier management and assignment to Groups for access control. +* **[Secret](/ai-management/ai-studio/secrets):** Securely stored credentials (API keys, tokens) referenced indirectly (e.g., `$SECRET/MY_KEY`) in configurations like LLMs, Tools, or Data Sources. +* **[Filter](/ai-management/ai-studio/filters):** Custom logic (using Tengo scripts) associated with specific execution points (e.g., pre/post LLM request) to intercept and modify requests/responses. + +## Getting Started + +Now that you understand the core concepts, you're ready to begin your AI Studio journey: + +1. **[Choose your installation method](/ai-management/ai-studio/quickstart)**: Docker/Packages (recommended) or Kubernetes +2. **[Complete first-time setup](/ai-management/ai-studio/configuration)**: Register your admin user and configure your first LLM +3. **Explore the platform**: Start with the chat interface and gradually explore advanced features + + + + + **Ready to start?** Head to the [Installation Guide](/ai-management/ai-studio/quickstart) to get AI Studio up and running in minutes. + + diff --git a/ai-management/ai-studio/dashboard.mdx b/ai-management/ai-studio/dashboard.mdx new file mode 100644 index 000000000..837ae6e22 --- /dev/null +++ b/ai-management/ai-studio/dashboard.mdx @@ -0,0 +1,55 @@ +--- +title: "Dashboard Overview for Tyk AI Studio" +description: "Overview of dashboard in AI Studio?" +keywords: "AI Studio, AI Management, Dashboard" +--- + +This dashboard provides an overview of user engagement, cost analysis, and tool/model usage for the Tyk AI Studio. Below is a breakdown of the various elements and their significance: + +--- + +#### **Conversations** +This section monitors user interaction trends over a selected time period. + +- **Unique Users per Day**: + A line graph displaying the daily count of unique users engaging with the platform. This metric helps track active user trends. + +- **Chat Interactions per Day**: + A line graph showing the number of chat interactions per day. This reflects user activity levels and engagement with AI-powered chat features. + +--- + +#### **Cost Analysis** +This section offers insights into the expenditure associated with the usage of Large Language Models (LLMs). + +- **Cost Analysis by Currency**: + A line chart representing the cost incurred over time, broken down by currency. This visual allows for tracking fluctuations in platform expenditure. + +- **Total Cost per Vendor and Model**: + A table summarizing costs by vendor and model, including: + - **Vendor**: The provider of the LLM (e.g., Anthropic, OpenAI). + - **Model**: The specific LLM used (e.g., `claude-3.5`, `gpt-4.0`). + - **Total Cost**: The cumulative cost for the model. + - **Currency**: The unit of currency in which the cost is calculated (e.g., USD). + +--- + +#### **Model and Tool Usage** +This section highlights the utilization statistics for LLM models and tools within the platform. + +- **Most Used LLM Models**: + A bar chart showcasing the frequency of usage for different LLM models. This data provides insights into which models are most relied upon. + +- **Tool Usage Statistics**: + A bar chart detailing the frequency of tool usage across various functionalities, such as vector searches, sandbox environments, and data management. This helps identify popular tools and areas for optimization. + +--- + +#### **Additional Features** +- **Date Selector**: Located in the top-right corner, this allows users to define a custom date range for analyzing trends across the dashboard. + +- **Feedback Button**: A floating button labeled "Feedback" to enable users to report issues or provide suggestions regarding the dashboard. + +--- + +This dashboard serves as a comprehensive monitoring and analytical tool for administrators and users to evaluate platform performance, user engagement, and cost efficiency. diff --git a/ai-management/ai-studio/datasources-rag.mdx b/ai-management/ai-studio/datasources-rag.mdx new file mode 100644 index 000000000..344854fb5 --- /dev/null +++ b/ai-management/ai-studio/datasources-rag.mdx @@ -0,0 +1,200 @@ +--- +title: "Data Sources & RAG" +description: "How to integrate Data Sources & RAG in Tyk AI Studio?" +keywords: "AI Studio, AI Management, Datasources, RAG" +sidebarTitle: "Data Sources & RAG" +--- + +Tyk AI Studio's Data Source system connects the platform to external knowledge bases, primarily vector stores, enabling **Retrieval-Augmented Generation (RAG)**. This allows Large Language Models (LLMs) to access and utilize specific information from your documents, grounding their responses in factual data. + +## Purpose + +The primary goal is to enhance LLM interactions by: + +* **Providing Context:** Injecting relevant information retrieved from configured data sources directly into the LLM prompt. +* **Improving Accuracy:** Reducing hallucinations and grounding LLM responses in specific, verifiable data. +* **Accessing Private Knowledge:** Allowing LLMs to leverage internal documentation, knowledge bases, or other proprietary information. + +## Core Concepts + +* **Data Source:** A configuration in Tyk AI Studio that defines a connection to a specific knowledge base (typically a vector store) and the associated embedding service used to populate it. +* **Vector Store Abstraction:** Tyk AI Studio provides a unified interface to interact with various vector database types (e.g., Pinecone, Milvus, ChromaDB). Administrators configure the connection details for their chosen store. +* **Embedding Service:** Text needs to be converted into numerical vector embeddings before being stored and searched. Administrators configure the embedding service (e.g., OpenAI `text-embedding-ada-002`, a local Sentence Transformer model via an API endpoint) and its credentials (using [Secrets Management](/ai-management/ai-studio/secrets)). +* **File Processing:** Administrators upload documents (e.g., PDF, TXT, DOCX) to a Data Source configuration. Tyk AI Studio automatically: + * Chunks the documents into smaller, manageable pieces. + * Uses the configured Embedding Service to convert each chunk into a vector embedding. + * Stores the text chunk and its corresponding embedding in the configured Vector Store. +* **RAG (Retrieval-Augmented Generation):** The core process where: + 1. A user's query in the [Chat Interface](/ai-management/ai-studio/chat-interface) is embedded using the same embedding service. + 2. This query embedding is used to search the relevant vector store(s) for the most similar text chunks (based on vector similarity). + 3. The retrieved text chunks are added as context to the prompt sent to the LLM. + 4. The LLM uses this context to generate a more informed and relevant response. +* **Data Source Catalogues:** Similar to Tools, Data Sources are grouped into Catalogues for easier management and assignment to teams. +* **Privacy Levels:** Each Data Source has a privacy level. It can only be used in RAG if its level is less than or equal to the privacy level of the [LLM Configuration](/ai-management/ai-studio/llm-management) being used, ensuring data governance. + + Privacy levels define how data is protected by controlling LLM access based on its sensitivity: + - Public – Safe to share (e.g., blogs, press releases). + - Internal – Company-only info (e.g., reports, policies). + - Confidential – Sensitive business data (e.g., financials, strategies). + - Restricted (PII) – Personal data (e.g., names, emails, customer info). + +## How RAG Works in the Chat Interface + +When RAG is enabled for a Chat Experience: + +1. User sends a prompt. +2. Tyk AI Studio embeds the user's prompt using the configured embedding service for the relevant Data Source(s). +3. Tyk AI Studio searches the configured Vector Store(s) using the prompt embedding to find relevant text chunks. +4. The retrieved chunks are formatted and added to the context window of the LLM prompt. +5. The combined prompt (original query + retrieved context) is sent to the LLM. +6. The LLM generates a response based on both the query and the provided context. +7. The response is streamed back to the user. + +## Creating & Managing Data Sources (Admin) + +Administrators configure Data Sources via the UI or API: + +1. **Define Data Source:** Provide a name, description, and privacy level. +2. **Configure Vector Store:** + * Select the database type (e.g., `pinecone`). + * Provide connection details (e.g., endpoint/connection string, namespace/index name). + * Reference a [Secret](/ai-management/ai-studio/secrets) containing the API key/credentials. +3. **Configure Embedding Service:** + * Select the vendor/type (e.g., `openai`, `local`). + * Specify the model name (if applicable). + * Provide the service URL (if applicable, for local models). + * Reference a [Secret](/ai-management/ai-studio/secrets) containing the API key (if applicable). +4. **Upload Files:** Upload documents to be chunked, embedded, and indexed into the vector store. + + Datasource Config + +## Organizing & Assigning Data Sources (Admin) + +* **Create Catalogues:** Group related Data Sources into Catalogues (e.g., "Product Docs", "Support KB"). +* **Assign to Groups:** Assign Data Source Catalogues to specific Teams. + + Catalogue Config + +## Using Data Sources (User) + +Data Sources are primarily used implicitly via RAG within the [Chat Interface](/ai-management/ai-studio/chat-interface). + +A Data Source will be used for RAG if: + +1. The specific Chat Experience configuration includes the relevant Data Source Catalogue. +2. The user belongs to a Team that has been assigned that Data Source Catalogue. +3. The Data Source's privacy level is compatible with the LLM being used. + +## Programmatic Access via API + +Tyk AI Studio provides a direct API endpoint for querying configured Data Sources programmatically: + +### Datasource API Endpoint + +* **Endpoint:** `/datasource/{dsSlug}` (where `{dsSlug}` is the datasource identifier) +* **Method:** POST +* **Authentication:** Bearer token required in the Authorization header + +### Request Format + +```json +{ + "query": "your semantic search query here", + "n": 5 // optional, number of results to return (default: 3) +} +``` + +### Response Format + +```json +{ + "documents": [ + { + "content": "text content of the document chunk", + "metadata": { + "source": "filename.pdf", + "page": 42 + } + }, + // additional results... + ] +} +``` + +### Example Usage + +#### cURL + +```bash +curl -X POST "https://your-tyk-instance/datasource/product-docs" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"query": "How do I configure authentication?", "n": 3}' +``` + +#### Python + +```python +import requests + +url = "https://your-tyk-instance/datasource/product-docs" +headers = { + "Authorization": "Bearer YOUR_TOKEN", + "Content-Type": "application/json" +} +payload = { + "query": "How do I configure authentication?", + "n": 3 +} + +response = requests.post(url, json=payload, headers=headers) +results = response.json() + +for doc in results["documents"]: + print(f"Content: {doc['content']}") + print(f"Source: {doc['metadata']['source']}") + print("---") +``` + +#### JavaScript + +```javascript +async function queryDatasource() { + const response = await fetch('https://your-tyk-instance/datasource/product-docs', { + method: 'POST', + headers: { + 'Authorization': 'Bearer YOUR_TOKEN', + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + query: 'How do I configure authentication?', + n: 3 + }) + }); + + const data = await response.json(); + + data.documents.forEach(doc => { + console.log(`Content: ${doc.content}`); + console.log(`Source: ${doc.metadata.source}`); + console.log('---'); + }); +} +``` + +### Common Issues and Troubleshooting + +1. **Trailing Slash Error:** The endpoint does not accept a trailing slash. Use `/datasource/{dsSlug}` and not `/datasource/{dsSlug}/`. + +2. **Authentication Errors:** Ensure your Bearer token is valid and has not expired. The token must have permissions to access the specified datasource. + +3. **404 Not Found:** Verify that the datasource slug is correct and that the datasource exists and is properly configured. + +4. **403 Forbidden:** Check that your user account has been granted access to the datasource catalogue containing this datasource. + +5. **Empty Results:** If you receive an empty documents array, try: + - Reformulating your query to better match the content + - Increasing the value of `n` to get more results + - Verifying that the datasource has been properly populated with documents + +This API endpoint allows developers to build custom applications that leverage the semantic search capabilities of configured vector stores without needing to implement the full RAG pipeline. diff --git a/ai-management/ai-studio/deployment-k8s.mdx b/ai-management/ai-studio/deployment-k8s.mdx new file mode 100644 index 000000000..3d016a71c --- /dev/null +++ b/ai-management/ai-studio/deployment-k8s.mdx @@ -0,0 +1,686 @@ +--- +title: "Installation (Kubernetes)" +description: "Install Tyk AI Studio" +keywords: "AI Studio, AI Management" +sidebarTitle: "Kubernetes" +--- + +This guide explains how to deploy Tyk AI Studio, a secure and extensible AI gateway, using pure Kubernetes manifests. + +## Prerequisites + +- Kubernetes 1.16+ +- kubectl configured with access to your cluster +- A `TYK_AI_LICENSE` string from Tyk Technologies (contact support@tyk.io or your account manager to obtain) +- A securely generated `TYK_AI_SECRET_KEY` string for secrets encryption +- If using SSL/TLS: cert-manager installed in your cluster + +*Note: The following examples use placeholder values (e.g., `your-domain.com`, `your-secret-key`). Remember to replace these with your actual configuration values.* + +## Installation Options + +Tyk AI Studio can be deployed in several configurations: + +1. Local Development +2. Production without TLS +3. Production with TLS +4. Production with External Database + +### Option 1: Local Development Setup + +1. Create a `local-deployment.yaml` file: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: tyk-ai-studio +--- +apiVersion: v1 +kind: Secret +metadata: + name: tyk-ai-config + namespace: tyk-ai-studio +type: Opaque +stringData: + ALLOW_REGISTRATIONS: "true" + ADMIN_EMAIL: "admin@localhost" + SITE_URL: "http://localhost:32580" + FROM_EMAIL: "noreply@localhost" + DEV_MODE: "true" + DATABASE_TYPE: "postgres" + TYK_AI_SECRET_KEY: "your-secret-key" + TYK_AI_LICENSE: "your-license" + DATABASE_URL: "postgres://postgres:localdev123@postgres:5432/tyk-ai-studio" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres + namespace: tyk-ai-studio +spec: + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:13 + env: + - name: POSTGRES_DB + value: "tyk-ai-studio" + - name: POSTGRES_USER + value: "postgres" + - name: POSTGRES_PASSWORD + value: "localdev123" + ports: + - containerPort: 5432 + volumeMounts: + - name: postgres-data + mountPath: /var/lib/postgresql/data + volumes: + - name: postgres-data + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: tyk-ai-studio +spec: + selector: + app: postgres + ports: + - port: 5432 + targetPort: 5432 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tyk-ai-studio + namespace: tyk-ai-studio +spec: + replicas: 1 + selector: + matchLabels: + app: tyk-ai-studio + template: + metadata: + labels: + app: tyk-ai-studio + spec: + containers: + - name: ai-studio + image: tykio/tyk-ai-studio:latest + envFrom: + - secretRef: + name: tyk-ai-config + ports: + - containerPort: 8080 + - containerPort: 9090 +--- +apiVersion: v1 +kind: Service +metadata: + name: tyk-ai-studio + namespace: tyk-ai-studio +spec: + type: NodePort + selector: + app: tyk-ai-studio + ports: + - name: http + port: 8080 + targetPort: 8080 + nodePort: 32580 + - name: gateway + port: 9090 + targetPort: 9090 + nodePort: 32590 +``` + +2. Deploy the application: + +```bash +kubectl apply -f local-deployment.yaml +``` + +3. Access the application: +- Web Interface: http://localhost:32580 +- Gateway: http://localhost:32590 + +### Option 2: Production without TLS + +For a production deployment without TLS certificates: + +1. Create `production-no-tls.yaml`: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: tyk-ai-studio +--- +apiVersion: v1 +kind: Secret +metadata: + name: tyk-ai-config + namespace: tyk-ai-studio +type: Opaque +stringData: + ALLOW_REGISTRATIONS: "true" + ADMIN_EMAIL: "admin@yourdomain.com" + SITE_URL: "http://app.yourdomain.com" + FROM_EMAIL: "noreply@yourdomain.com" + DEV_MODE: "false" + DATABASE_TYPE: "postgres" + TYK_AI_SECRET_KEY: "your-production-key" + TYK_AI_LICENSE: "your-production-license" + DATABASE_URL: "postgres://your-db-user:your-db-password@your-db-host:5432/tyk-ai-studio" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tyk-ai-studio + namespace: tyk-ai-studio +spec: + replicas: 2 + selector: + matchLabels: + app: tyk-ai-studio + template: + metadata: + labels: + app: tyk-ai-studio + spec: + containers: + - name: ai-studio + image: tykio/tyk-ai-studio:latest + envFrom: + - secretRef: + name: tyk-ai-config + ports: + - containerPort: 8080 + - containerPort: 9090 + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: 1000m + memory: 2Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: tyk-ai-studio + namespace: tyk-ai-studio +spec: + selector: + app: tyk-ai-studio + ports: + - name: http + port: 8080 + targetPort: 8080 + - name: gateway + port: 9090 + targetPort: 9090 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tyk-ai-studio-ingress + namespace: tyk-ai-studio + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: app.yourdomain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: tyk-ai-studio + port: + number: 8080 + - host: gateway.yourdomain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: tyk-ai-studio + port: + number: 9090 +``` + +2. Deploy: + +```bash +kubectl apply -f production-no-tls.yaml +``` + +### Option 3: Production with TLS + +For a secure production deployment with TLS: + +1. Create `production-tls.yaml`: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: tyk-ai-studio +--- +apiVersion: v1 +kind: Secret +metadata: + name: tyk-ai-config + namespace: tyk-ai-studio +type: Opaque +stringData: + ALLOW_REGISTRATIONS: "true" + ADMIN_EMAIL: "admin@yourdomain.com" + SITE_URL: "https://app.yourdomain.com" + FROM_EMAIL: "noreply@yourdomain.com" + DEV_MODE: "false" + DATABASE_TYPE: "postgres" + TYK_AI_SECRET_KEY: "your-production-key" + TYK_AI_LICENSE: "your-production-license" + DATABASE_URL: "postgres://user:password@your-production-db:5432/tyk-ai-studio" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tyk-ai-studio + namespace: tyk-ai-studio +spec: + replicas: 2 + selector: + matchLabels: + app: tyk-ai-studio + template: + metadata: + labels: + app: tyk-ai-studio + spec: + containers: + - name: ai-studio + image: tykio/tyk-ai-studio:latest + envFrom: + - secretRef: + name: tyk-ai-config + ports: + - containerPort: 8080 + - containerPort: 9090 + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: 1000m + memory: 2Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: tyk-ai-studio + namespace: tyk-ai-studio +spec: + selector: + app: tyk-ai-studio + ports: + - name: http + port: 8080 + targetPort: 8080 + - name: gateway + port: 9090 + targetPort: 9090 +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: app-tls-certificate + namespace: tyk-ai-studio +spec: + secretName: app-tls-secret + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - app.yourdomain.com +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: gateway-tls-certificate + namespace: tyk-ai-studio +spec: + secretName: gateway-tls-secret + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - gateway.yourdomain.com +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tyk-ai-studio-ingress + namespace: tyk-ai-studio + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/ssl-redirect: "true" +spec: + tls: + - hosts: + - app.yourdomain.com + secretName: app-tls-secret + - hosts: + - gateway.yourdomain.com + secretName: gateway-tls-secret + rules: + - host: app.yourdomain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: tyk-ai-studio + port: + number: 8080 + - host: gateway.yourdomain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: tyk-ai-studio + port: + number: 9090 +``` + +2. Deploy: + +```bash +kubectl apply -f production-tls.yaml +``` + +## Optional Components + +### Reranker Service + +The Reranker service improves RAG result relevance. Add it to your deployment: + +```yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reranker + namespace: tyk-ai-studio +spec: + replicas: 1 + selector: + matchLabels: + app: reranker + template: + metadata: + labels: + app: reranker + spec: + containers: + - name: reranker + image: tykio/reranker_cpu:latest + ports: + - containerPort: 8080 + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: 1000m + memory: 2Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: reranker + namespace: tyk-ai-studio +spec: + selector: + app: reranker + ports: + - port: 8080 + targetPort: 8080 +``` + +### Transformer Server + +The Transformer Server handles embedding generation and model inference. Add it to your deployment: + +```yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: transformer-server + namespace: tyk-ai-studio +spec: + replicas: 1 + selector: + matchLabels: + app: transformer-server + template: + metadata: + labels: + app: transformer-server + spec: + containers: + - name: transformer-server + image: tykio/transformer_server_cpu:latest + ports: + - containerPort: 8080 + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: 1000m + memory: 2Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: transformer-server + namespace: tyk-ai-studio +spec: + selector: + app: transformer-server + ports: + - port: 8080 + targetPort: 8080 +``` + +## Database Options + +### Using Internal PostgreSQL + +For development or small deployments, you can deploy PostgreSQL within your cluster: + +```yaml +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-pvc + namespace: tyk-ai-studio +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres + namespace: tyk-ai-studio +spec: + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:13 + env: + - name: POSTGRES_DB + value: "tyk-ai-studio" + - name: POSTGRES_USER + value: "postgres" + - name: POSTGRES_PASSWORD + value: "secure-password" + ports: + - containerPort: 5432 + volumeMounts: + - name: postgres-storage + mountPath: /var/lib/postgresql/data + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 500m + memory: 1Gi + volumes: + - name: postgres-storage + persistentVolumeClaim: + claimName: postgres-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: tyk-ai-studio +spec: + selector: + app: postgres + ports: + - port: 5432 + targetPort: 5432 +``` + +### Using External Database + +For production environments, configure your external database connection in the Secret: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: tyk-ai-config + namespace: tyk-ai-studio +type: Opaque +stringData: + DATABASE_URL: "postgres://user:password@your-db-host:5432/tyk-ai-studio" + # ... other config values +``` + +## Maintenance + +### Upgrading + +To upgrade an existing installation: + +```bash +# Update the deployment with new configuration +kubectl apply -f your-deployment.yaml + +# Or update just the image +kubectl set image deployment/tyk-ai-studio ai-studio=tykio/tyk-ai-studio:new-version -n tyk-ai-studio +``` + +### Uninstalling + +To remove the deployment: + +```bash +# Delete all resources in the namespace +kubectl delete namespace tyk-ai-studio + +# Or delete specific resources +kubectl delete -f your-deployment.yaml +``` + +### Viewing Logs + +```bash +# Main application logs +kubectl logs -l app.kubernetes.io/name=tyk-ai-studio + +# Database logs (if using internal database) +kubectl logs -l app=postgres + +# Optional component logs +kubectl logs -l app=reranker +kubectl logs -l app=transformer +``` + +## Troubleshooting + +1. Check pod status: +```bash +kubectl get pods +``` + +2. Check ingress configuration: +```bash +kubectl get ingress +``` + +3. View pod details: +```bash +kubectl describe pod +``` + +4. Common issues: +- Database connection failures: Check credentials and network access +- Ingress not working: Verify DNS records and TLS configuration +- Resource constraints: Check pod resource limits and node capacity + +## First User Registration + +After deployment, you need to create your first admin user: + +1. **Access the application**: Navigate to your configured `SITE_URL` (e.g., `https://app.yourdomain.com`) +2. **Register with admin email**: Use the EXACT email address you set in the `ADMIN_EMAIL` environment variable in your Secret +3. **Complete registration**: The first user who registers with the admin email will automatically become the administrator + + + + + **Important**: The first user registration must use the same email address specified in the `ADMIN_EMAIL` environment variable. This user will have full administrative privileges. + + + +## Next Steps + +Once deployed and you've registered your first user, proceed to the [First Steps guide](/ai-management/ai-studio/configuration) to configure Tyk AI Studio. diff --git a/ai-management/ai-studio/filters.mdx b/ai-management/ai-studio/filters.mdx new file mode 100644 index 000000000..8cfc93308 --- /dev/null +++ b/ai-management/ai-studio/filters.mdx @@ -0,0 +1,284 @@ +--- +title: "Filters and Middleware" +description: "How to use Filters and Middleware in Tyk AI Studio?" +keywords: "AI Studio, AI Management, Filters, Middleware" +sidebarTitle: "Filters & Policies" +--- + +The **Filters List View** allows administrators to manage filters and middleware applied to prompts or data sent to Large Language Models (LLMs) via the AI Gateway or Chat Rooms. Filters and middleware ensure data governance, compliance, and security by processing or controlling the flow of information. Below is an enhanced description with the distinction between **Filters** and **Middleware**: + +--- + +#### **Filters vs. Middleware** + +1. **Filters**: + - **Purpose**: Filters act as governance blocks that either approve or deny a prompt before it reaches the upstream LLM. + - **Behavior**: + - Filters **do not modify the prompt**. + - They analyze the contents of the input prompt to decide if it complies with organizational policies or contains restricted content. + - Example: A PII detector that blocks prompts containing sensitive information. + +2. **Middleware**: + - **Purpose**: Middleware processes prompts or outputs generated by tools, modifying them before they are passed on to the LLM. + - **Behavior**: + - Middleware **modifies the prompt or output** to enhance security, anonymize data, or perform other transformations. + - Middleware only works with tools (e.g., API-based services) and is not used directly with raw input prompts. + - Example: An anonymizer that removes Personally Identifiable Information (PII) from tool outputs. + +--- + +#### **Table Overview** + +1. **Name**: + - The name of the filter or middleware (e.g., `Anonymize PII (LLM)`, `Fixed PII Filter`). + +2. **Description**: + - A brief summary of the filter or middleware's functionality (e.g., "Uses Regex to remove obvious PII"). + +3. **Actions**: + - A menu (three-dot icon) that allows administrators to: + - Edit the filter or middleware. + - Delete the filter or middleware. + +--- + +#### **Features** + +1. **Add Filter Button**: + - A green button labeled **+ ADD FILTER**, located in the top-right corner. Clicking this button opens a form to create a new filter or middleware. + +2. **Pagination Dropdown**: + - Located at the bottom-left corner, this control allows administrators to adjust the number of entries displayed per page. + +--- + +#### **Examples of Filters and Middleware** + +- **Filters**: + - **PII Detector**: A regex-based filter that blocks prompts containing sensitive PII. + - **JIRA Field Analysis**: Ensures no PII is included in data retrieved from JIRA fields before passing to the LLM. + +- **Middleware**: + - **Anonymize PII (LLM)**: Uses an LLM to anonymize sensitive data before sending it downstream. + - **NER Service Filter**: A Named Entity Recognition (NER) microservice that modifies outputs to remove identified entities. + +--- + +#### **Use Cases** + +1. **Prompt Validation with Filters**: + - Ensures that only compliant and secure prompts are sent to LLMs. + - Example: Blocking a prompt with sensitive data that should not be processed by an unapproved vendor. + +2. **Data Preprocessing with Middleware**: + - Prepares data from tools or external sources for safe interaction with LLMs by modifying or anonymizing content. + - Example: Removing sensitive ticket details from a JIRA query before sending to an LLM. + +3. **Organizational Security**: + - Both filters and middleware ensure sensitive information is protected and handled in line with organizational governance policies. + +4. **Enhanced Tool Interactions**: + - Middleware supports tools by transforming their outputs, enabling richer and safer LLM interactions. + +--- + +#### **Key Benefits** + +1. **Improved Data Governance**: + - Filters and middleware work together to enforce strict controls over data flow, protecting sensitive information. + +2. **Flexibility**: + - Middleware allows for data transformation, enhancing interoperability between tools and LLMs. + - Filters ensure compliance without altering user-provided prompts. + +3. **Compliance and Security**: + - Prevent unauthorized or sensitive data from reaching unapproved vendors, ensuring regulatory compliance. + +This detailed structure for **Filters and Middleware** provides organizations with robust governance tools to secure and optimize data workflows in the Tyk AI Studio. + +### Filter Edit View (and example Filter) + +The **Filter Edit View** enables administrators to create or modify filters using the **Tengo scripting language**. Filters serve as governance tools that analyze input data (e.g., prompts or files) and decide whether the content is permitted to pass to the upstream LLM. In this example, the filter uses regular expressions (regex) to detect Personally Identifiable Information (PII) and blocks the prompt if any matches are found. + +--- + +#### **Form Sections and Fields** + +1. **Name** *(Required)*: + - Specifies the name of the filter (e.g., `PII Detector`). + +2. **Description** *(Optional)*: + - A brief summary of the filter's purpose and functionality (e.g., "Simple Regex-based PII detector to prevent the wrong data being sent to LLMs"). + +3. **Script** *(Required)*: + - A **Tengo script** that defines the logic of the filter. The script evaluates input data and determines whether the filter approves or blocks it. + - The example script detects PII using a collection of regex patterns and blocks the data if a match is found. + +--- + +#### **Example Script** + +This script demonstrates a regex-based PII detection filter: + +```tengo +text := import("text") + +// regexes for various PII +patterns := { + "email": `[\w\.-]+@[\w\.-]+\.\w+`, + "phone": `\+?\d{1,3}?[-.\s]?\(?\d{1,4}?\)?[-.\s]?\d{1,4}[-.\s]?\d{1,9}`, + "ssn": `\b\d{3}-\d{2}-\d{4}\b`, + "credit_card": `\b(?:\d[ -]*?){13,16}\b`, + "ipv4": `\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b`, + "ipv6": `\b([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}\b`, + "dob": `\b\d{1,2}[/-]\d{1,2}[/-]\d{2,4}\b`, + "address": `\d+\s[A-Za-z]+\s[A-Za-z]+`, + "passport": `\b[A-PR-WYa-pr-wy][1-9]\d\s?\d{4}[1-9]\b`, + "drivers_license": `\b[A-Z]{1,2}\d{1,7}\b`, + "bank_account": `\b\d{8,17}\b` +} + +/* +payload := ` +John Doe, born on 12/25/1980, +resides at 1234 Elm Street, Springfield, IL 62704. +You can contact him via email at john.doe@example.com or +by phone at +1-555-123-4567. His Social Security Number is 123-45-6789, +and his U.S. passport number is 987654321. John's driver's license +number is D1234567, and his bank account number is 123456789012. +He often uses the IP address 192.168.1.1 to access his online banking. +` +*/ + +filter := func(payload) { + for key, pattern in patterns { + found := text.re_match(pattern, payload) + if found { + return false + } + } + + return true +} + +result := filter(payload) +``` + +--- + +#### **Key Features of the Script** + +1. **Patterns Dictionary**: + - Defines regex patterns for detecting specific PII types (e.g., email, phone, SSN, IP addresses, etc.). + +2. **Filter Function**: + - Iterates through the patterns and checks if the input payload matches any of them. + - If a match is found, the filter blocks the input (`return false`). + +3. **Usage Context**: + - This filter can be applied to prompts, files, or any other input to ensure that sensitive information is not unintentionally shared with an LLM. + +--- + +#### **Action Buttons** +1. **Update Filter / Create Filter**: + - Saves the filter configuration, making it active for future data processing. + +2. **Back to Filters**: + - Returns to the Filters List View without saving changes. + +--- + +#### **Purpose and Benefits** + +1. **Data Governance**: + - Enforces strict control over what data can be sent to LLMs, ensuring compliance with privacy regulations. + +2. **Flexibility**: + - Filters can be tailored to specific organizational needs using custom scripts. + +3. **Security**: + - Prevents sensitive or unauthorized data from leaking to unapproved vendors or external systems. + +This **Filter Edit View** provides a robust and customizable interface for creating scripts to enforce data governance and security in the Tyk AI Studio. + +### Example Middleware for Tools + +Middleware filters in the Tyk AI Studio modify data coming from tools before passing it to the LLM. These filters are applied to sanitize, anonymize, or enhance the data to ensure it complies with organizational standards and privacy regulations. Below is an example of a middleware filter that sanitizes Personally Identifiable Information (PII), specifically email addresses, from the tool's output. + +--- + +#### **Middleware Script: Email Redaction Example** + +```tengo +// Import the 'text' module for regular expression operations +text := import("text") + +// Define regular expression patterns for various PII +email_pattern := `[\w\.-]+@[\w\.-]+\.\w+` + +// Define the function to sanitize PII in the input string +filter := func(input) { + // Replace email addresses + input = text.re_replace(email_pattern, input, "[REDACTED EMAIL]") + + return input +} + +// Process the input payload +result := filter(payload) +``` + +--- + +#### **Explanation of the Script** + +1. **Module Import**: + - The `text` module is imported to enable regular expression operations (`text.re_replace`). + +2. **Regex Pattern**: + - A regex pattern is defined to detect email addresses: + - Example pattern: `[\w\.-]+@[\w\.-]+\.\w+` + - This pattern matches standard email formats. + +3. **Filter Function**: + - The `filter` function accepts an input string (e.g., tool output) and: + - Uses `text.re_replace` to identify email addresses. + - Replaces detected email addresses with `[REDACTED EMAIL]`. + +4. **Return Processed Output**: + - The sanitized output is returned, ensuring that sensitive information like email addresses is redacted before reaching the LLM. + +--- + +#### **Use Case for Middleware** + +**Tool Example**: +Imagine a tool, such as `Support Ticket Viewer`, which retrieves user tickets from a system. These tickets often contain email addresses. Middleware ensures that no sensitive email information is included in the output sent to the LLM. + +- **Input Payload Example**: + ```text + User email: john.doe@example.com has reported an issue with their account. + ``` + +- **Sanitized Output**: + ```text + User email: [REDACTED EMAIL] has reported an issue with their account. + ``` + +--- + +#### **Benefits of Middleware** + +1. **Data Privacy**: + - Protects sensitive user information by ensuring it is sanitized before being sent to external systems. + +2. **Compliance**: + - Ensures organizational adherence to privacy laws like GDPR or HIPAA. + +3. **Enhanced Security**: + - Prevents accidental sharing of PII with external vendors or LLMs. + +--- + +This middleware example demonstrates how flexible and powerful Tyk's scripting capabilities are, enabling administrators to enforce strict data governance policies while supporting advanced LLM and tool integration workflows. diff --git a/ai-management/ai-studio/llm-management.mdx b/ai-management/ai-studio/llm-management.mdx new file mode 100644 index 000000000..1219c1512 --- /dev/null +++ b/ai-management/ai-studio/llm-management.mdx @@ -0,0 +1,89 @@ +--- +title: "LLM Management" +description: "How to manage LLMs in Tyk AI Studio?" +keywords: "AI Studio, AI Management, LLM Management" +sidebarTitle: "LLM Management" +--- + +Tyk AI Studio provides a centralized system for managing Large Language Model (LLM) providers, models, associated costs, and usage budgets. This allows administrators to control which models are available, how they are used, and track associated expenses. + +## Overview + +The LLM Management system allows you to: + +* **Configure LLM Providers:** Connect to various LLM vendors (OpenAI, Anthropic, Azure OpenAI, Google Vertex AI, etc.). +* **Manage Models:** Specify which models from a provider are available for use within Tyk AI Studio. +* **Define Pricing:** Set input and output token costs for each model to enable accurate cost tracking. +* **Set Budgets:** Establish monthly spending limits for LLM usage, either globally for a model or per Application. +* **Control Access:** Determine which teams can access specific LLM configurations (via associated Apps). + +## Configuring LLM Providers + +Administrators can configure connections to different LLM providers through the UI or API. + +1. **Navigate:** Go to the LLM Configuration section in the Admin UI. +2. **Add New LLM:** Click "Add LLM Configuration". +3. **Provider Details:** + * **Name:** A user-friendly name for this configuration (e.g., "OpenAI GPT-4 Turbo"). + * **Vendor:** Select the LLM vendor (e.g., `openai`, `anthropic`, `azure`, `vertex`). + * **API Key/Credentials:** Securely provide the necessary authentication credentials. Use the **Secrets Management** system (`$SECRET/YourSecretName`) for best practice. + * **Base URL (Optional):** Override the default API endpoint if needed (e.g., for Azure OpenAI). + * **API Version (Optional):** Specify the API version for certain providers like Azure. + + LLM Provider Config + +4. **Model Selection:** + * **Allowed Models:** Specify the exact model names from the vendor that can be used via this configuration (e.g., `gpt-4-turbo`, `claude-3-opus-20240229`). + * **Default Model:** The model used if a request doesn't specify one. + +5. **Route ID:** A unique identifier used in API paths (e.g., `/proxy/{routeId}/...` or `/openai/{routeId}/...`) to target this specific LLM configuration. + +6. **Privacy Level:** Assign a privacy level to the LLM. This interacts with the Tool system, preventing tools with higher privacy levels from being used with LLMs having lower levels. + + Privacy levels define how data is protected by controlling LLM access based on its sensitivity: + - Public – Safe to share (e.g., blogs, press releases). + - Internal – Company-only info (e.g., reports, policies). + - Confidential – Sensitive business data (e.g., financials, strategies). + - Restricted (PII) – Personal data (e.g., names, emails, customer info). + +7. **Save:** Save the configuration. + +## Model Pricing + +To enable cost tracking in the Analytics system, you need to define the price per token for each model. + +1. **Navigate:** Go to the Model Prices section in the Admin UI. +2. **Add Price:** Define prices for specific models. + * **Vendor:** Select the vendor. + * **Model Name:** Enter the exact model name. + * **Input Token Price:** Cost per input token (usually stored as integer * 10000 for precision). + * **Output Token Price:** Cost per output token (usually stored as integer * 10000 for precision). + + Model Price Config + +3. **Save:** Save the pricing information. + +The Analytics system uses these prices along with token counts from LLM interactions (recorded by the Proxy and Chat systems) to calculate usage costs. + +## Budget Control + +Tyk AI Studio allows setting monthly spending limits to control AI costs. + +* **LLM Budget:** A global monthly budget can be set directly on an LLM configuration. This limits the total spending across *all* applications using that specific LLM configuration. +* **Application Budget:** A monthly budget can be set on an Application (`Apps` section). This limits the spending *for that specific application*, potentially across multiple LLM configurations it might use. + +**How it Works:** + +1. Budgets are checked *before* an LLM request is forwarded by the Proxy. +2. The system calculates the current monthly spending for the relevant entity (LLM or App) based on data from the Analytics system. +3. If the current spending plus the estimated cost of the *incoming* request (if calculable, otherwise based on past usage) exceeds the budget, the request is blocked (e.g., 429 Too Many Requests). +4. The **Notification System** can be configured to send alerts when budget thresholds (e.g., 80%, 100%) are reached. + +**Configuration:** + +* **LLM Budget:** Set the `MonthlyBudget` field when creating/editing an LLM configuration. +* **App Budget:** Set the `MonthlyBudget` field when creating/editing an App configuration. + + Budget Config + +By combining LLM configuration, pricing, and budgeting, administrators gain granular control over AI model access and expenditure within Tyk AI Studio. diff --git a/ai-management/ai-studio/llms.mdx b/ai-management/ai-studio/llms.mdx new file mode 100644 index 000000000..04e697ca8 --- /dev/null +++ b/ai-management/ai-studio/llms.mdx @@ -0,0 +1,132 @@ +--- +title: "Large Language Models (LLMs)" +description: "How to configure LLMs in AI Studio?" +keywords: "AI Studio, AI Management, LLMs, Large Language Models" +--- + +The **LLMs View** provides administrators with an overview of the Large Language Model (LLM) vendors integrated into the portal. This section allows for managing the LLMs available for use in Chat Room features and the AI Gateway. Below is a breakdown of the features and data displayed: + +--- + +#### **Table Overview** +The table displays the following columns: + +1. **Name**: + The name of the LLM vendor or model (e.g., Anthropic, OpenAI GPT-4o, VLLM). + +2. **Short Description**: + A brief overview of the LLM, highlighting its features, strengths, and recommended use cases. + - Example: "Anthropic's flagship LLM: Claude is known for its excellent support for code-related tasks and code generation." + +3. **Vendor**: + The name of the LLM vendor providing the model (e.g., Anthropic, OpenAI). + +4. **Privacy Level**: + Indicates the model's privacy capability. Privacy levels define how data is protected by controlling LLM access based on its sensitivity. LLM providers with lower privacy levels can't access higher-level data sources and tools, ensuring secure and appropriate data handling. + + The system works with 4 privacy levels from low to high: + - Public – Safe to share (e.g., blogs, press releases). + - Internal – Company-only info (e.g., reports, policies). + - Confidential – Sensitive business data (e.g., financials, strategies). + - Restricted (PII) – Personal data (e.g., names, emails, customer info). + +5. **Proxied**: + A status indicator showing whether the LLM is proxied through the AI Gateway for added security and control. + - **Green dot**: Proxied. + - **Red dot**: Not proxied. + +6. **Actions**: + A menu (three-dot icon) that allows administrators to perform specific actions on the LLM, such as editing its details, managing configurations, or removing it from the portal. + +--- + +#### **Features** +1. **Add LLM Button**: + A green button in the top-right corner labeled **+ ADD LLM**. Clicking this button opens a form to integrate a new LLM vendor or model into the portal. + +2. **Pagination Dropdown**: + Found at the bottom-left corner, this dropdown allows users to control how many LLMs are displayed per page. + +--- + +#### **Use Cases** +- **Chat Room Features**: + LLMs integrated into the Chat Room provide users with easy access to conversational AI for various purposes, such as general inquiries or task automation. + +- **AI Gateway**: + LLMs configured for the AI Gateway are used to route requests securely, offering fine-grained access control and privacy protections for API calls. + +--- + +### Edit/Create LLM Vendor View + +The **Edit/Create LLM Vendor View** allows administrators to configure or update the details of a Large Language Model (LLM) vendor. This form supports adding new LLMs or modifying existing ones for use in the Chat Room features and AI Gateway. Below is a detailed breakdown of the form fields and their functionality: + +--- + +#### **Form Sections and Fields** + +##### **LLM Description** +1. **Name** *(Required)*: + The name of the LLM vendor (e.g., "Anthropic"). This field identifies the LLM within the portal. + +2. **Short Description** *(Optional)*: + A brief summary of the LLM's capabilities, highlighting its key features (e.g., "Claude is known for its excellent support for code-related tasks and code generation."). + +3. **Long Description** *(Optional)*: + A detailed explanation of the LLM, including its history, technical details, and specific use cases. This provides deeper insight into the model for administrators and users. + +4. **Vendor** *(Dropdown)*: + The name of the vendor offering the LLM. This dropdown lists pre-configured vendors (e.g., Anthropic, OpenAI). + +5. **Default Model** *(Required)*: + The specific model to use by default for this LLM (e.g., "claude-3.5-sonnet-20240620"). Administrators can specify the exact model version. + +6. **Privacy Level** *(Optional)*: + The privacy capability of the LLM. Select from four levels (Public, Internal, Confidential, Restricted) to indicate what sensitivity of data the LLM can handle. + +--- + +##### **Access Details** +This section defines the API-related details required for integrating the LLM. + +1. **API Endpoint** *(Required for Gateway Integration)*: + The URL used to send requests to the LLM. This is necessary for enabling the LLM in the AI Gateway. + +2. **API Key** *(Optional)*: + A secure key for authenticating with the LLM's API. + - **View/Hide Toggle**: Allows administrators to toggle between showing and hiding the key for security purposes. + +--- + +##### **Portal Display Information** +Settings that determine how the LLM appears in the portal for end-users and developers. + +1. **Logo URL** *(Optional)*: + A link to an image that represents the LLM vendor. This logo is displayed in the portal's user interface. + +2. **Enabled in Proxy** *(Toggle)*: + Determines whether the LLM is proxied through the AI Gateway. + - **Enabled**: Routes traffic securely through the gateway. + - **Disabled**: Direct access to the LLM's API. + +--- + +##### **Filters** +- **Filters** *(Optional)*: + A field to add or manage filters that are executed in the AI Gateway when a request flows through the REST endpoint. + - Example: Adding preprocessing steps to modify user queries before they reach the LLM. + +--- + +#### **Action Buttons** +1. **Update LLM / Create LLM**: + A button at the bottom of the form that saves the changes or creates the new LLM vendor. This button is active only when all required fields are completed. + +2. **Back to LLMs**: + A link at the top-right corner that navigates back to the LLMs List View without saving changes. + +--- + +#### **Purpose** +This view serves as a centralized interface for managing LLM integrations. It ensures administrators have the flexibility to configure details, enable secure access, and define how LLMs are presented and used within the portal. diff --git a/ai-management/ai-studio/model-prices.mdx b/ai-management/ai-studio/model-prices.mdx new file mode 100644 index 000000000..a05ef0093 --- /dev/null +++ b/ai-management/ai-studio/model-prices.mdx @@ -0,0 +1,109 @@ +--- +title: "Model Prices View for Tyk AI Studio" +description: "How to manage model prices in AI Studio?" +keywords: "AI Studio, AI Management, Model Price" +--- + +The **Model Prices View** allows administrators to manage and track the cost associated with Large Language Model (LLM) usage. This section is essential for monitoring expenses and setting pricing for LLM use within the AI Gateway and Chat Room features. Below is a detailed breakdown of the table and its features: + +--- + +#### **Table Overview** +The table displays the following columns: + +1. **Model Name**: + - The name of the LLM model for which pricing is defined (e.g., `claude-3.5-sonnet-20240620`, `gpt-4o`). + +2. **Vendor**: + - The organization providing the LLM (e.g., Anthropic, OpenAI). + +3. **Cost per Input Token**: + - The price charged for each input token processed by the LLM. + - Example: A value of `0.000001` represents the cost in the specified currency per token. + +4. **Cost per Output Token**: + - The price charged for each output token generated by the LLM. + - Example: A value of `0.000015` reflects the token generation cost in the specified currency. + +5. **Currency**: + - The currency in which the model's pricing is defined (e.g., USD). + +6. **Actions**: + - A menu (three-dot icon) with quick actions for each model, allowing administrators to: + - Edit the pricing. + - Delete the pricing configuration. + +--- + +#### **Features** + +1. **Add Model Price Button**: + - A green button labeled **+ ADD MODEL PRICE**, located in the top-right corner. Clicking this button opens a form to configure the pricing for a new LLM model. + +2. **Pagination Dropdown**: + - Located at the bottom-left corner, this control allows users to adjust how many pricing entries are displayed per page. + +--- + +#### **Use Cases** +- **AI Gateway Tracking**: + - Ensures that token-based costs for API calls through the AI Gateway are accurately logged and monitored. + +- **Chat Room Cost Analysis**: + - Tracks and evaluates expenses associated with user interactions in the Chat Room feature. + +--- + +#### **Quick Insights** +This section simplifies the process of tracking and updating LLM pricing, ensuring transparency and control over usage costs. The ability to edit or add prices directly from this interface provides flexibility for managing vendor costs dynamically as new models or pricing structures are introduced. + +### Edit/Create Model Prices + +The **Edit/Create Model Price Form** allows administrators to define or update the cost structure for a specific Large Language Model (LLM). The pricing information configured here is used for cost tracking and billing purposes and must align with the model names used in API calls or the model settings screen. + +--- + +#### **Form Fields and Descriptions** + +1. **Model Name** *(Required)*: + - The exact name of the model this price configuration applies to (e.g., `claude-3.5-sonnet-20240620`). + - **Important**: This name must match the name used in client API calls or the LLM settings in the portal for correct mapping. + +2. **Vendor** *(Dropdown)*: + - The name of the LLM provider (e.g., Anthropic, OpenAI). + - Selectable from pre-configured vendors in the portal. + +3. **Cost per Input Token** *(Decimal, Required)*: + - The price charged per input token sent to the LLM. + - Example: A value of `0.000001` indicates the cost per input token in the specified currency. + +4. **Cost per Output Token** *(Decimal, Required)*: + - The price charged per output token generated by the LLM. + - Example: A value of `0.000015` specifies the output token cost in the specified currency. + +5. **Currency** *(Text, Required)*: + - The currency in which the pricing is defined (e.g., USD). + - Example: Use "USD" for United States Dollar pricing. + +--- + +#### **Action Buttons** +1. **Update Model Price / Create Model Price**: + - A green button at the bottom of the form that saves the price configuration. It updates an existing price or creates a new one depending on the context. + +2. **Back to Model Prices**: + - A link in the top-right corner to return to the **Model Prices View** without saving changes. + +--- + +#### **Usage and Purpose** +- **Price Tracking**: + - Ensures token-level costs are recorded for API calls and Chat Room interactions. + - Provides transparency for usage billing. + +- **Integration Consistency**: + - Ensures that the configured pricing aligns with the exact model names used in client interactions or other system settings to avoid mismatches. + +--- + +This form is essential for cost management and aligns LLM usage with its associated financial metrics, providing a streamlined approach to managing expenses in the portal. diff --git a/ai-management/ai-studio/notifications.mdx b/ai-management/ai-studio/notifications.mdx new file mode 100644 index 000000000..9a4e8ddf0 --- /dev/null +++ b/ai-management/ai-studio/notifications.mdx @@ -0,0 +1,66 @@ +--- +title: "Notification" +description: "How to configure notifications in Tyk AI Studio?" +keywords: "AI Studio, AI Management, Notifications" +sidebarTitle: "Notifications" +--- + +Tyk AI Studio includes a centralized Notification System responsible for generating and delivering alerts and messages to users and administrators based on specific system events. + +## Purpose + +The Notification System aims to: + +* **Inform Stakeholders:** Keep users and administrators aware of important events or required actions. +* **Enable Proactive Management:** Alert administrators to potential issues or thresholds being reached (e.g., budget limits). +* **Improve User Experience:** Provide timely feedback on asynchronous processes or user-related events. + +## Key Features + +* **Event-Driven:** Notifications are triggered by specific occurrences within the Tyk AI Studio platform. +* **Configurable Channels:** Supports multiple delivery methods, primarily: + * **Email:** Sending notifications to registered user email addresses. + * **In-App Notifications:** Displaying messages directly within the Tyk AI Studio UI. +* **User Preferences:** Allows users (and potentially administrators) to configure which notifications they wish to receive and via which channels (where applicable). +* **Centralized Logic:** Provides a single system for managing notification templates and delivery rules. + +## Common Notification Triggers + +Examples of events that might trigger notifications include: + +* **[Budget Control](/ai-management/ai-studio/llm-management):** + * Approaching spending limit threshold (e.g., 80% of budget). + * Reaching or exceeding spending limit. +* **[User Management](/ai-management/ai-studio/user-management):** + * New user registration/invitation. + * Password reset request. + * Changes in user roles or group memberships. +* **System Health & Errors:** + * Significant system errors or failures. + * Service degradation alerts. +* **Security Events:** + * Suspicious login activity (if monitored). + * Changes to critical security settings. + +## Configuration + +* **System-Level (Admin):** Administrators typically configure the core settings for the notification system, such as: + * Email server (SMTP) details for sending emails. + * Default notification templates. + * Enabling/disabling specific system-wide notification types. +* **User-Level:** Users can often manage their notification preferences in their profile settings: + * Opt-in/opt-out of specific notification categories. + * Choose preferred delivery channels (e.g., receive budget alerts via email). + + Notification Prefs UI + +## Integration + +The Notification System integrates with various other Tyk AI Studio components that generate relevant events, including: + +* Budget Control System +* User Management System +* Analytics System (potentially for performance alerts) +* Proxy/Gateway (for error or security event alerts) + +This system ensures timely communication, helping users and administrators stay informed about the status and activity within the Tyk AI Studio platform. diff --git a/ai-management/ai-studio/overview.mdx b/ai-management/ai-studio/overview.mdx new file mode 100644 index 000000000..ae6aa8625 --- /dev/null +++ b/ai-management/ai-studio/overview.mdx @@ -0,0 +1,113 @@ +--- +title: "AI Studio" +description: "AI Management for Platform Teams with Tyk AI Studio, a comprehensive platform for managing and deploying AI LLMs and chats" +keywords: "AI Management, Platform Teams, Tyk AI Studio" +sidebarTitle: "Overview" +--- + +import AIStudioCards from '/snippets/AIStudioCards.mdx'; +import { ButtonLeft } from '/snippets/ButtonLeft.mdx'; + +Tyk AI Studio is a comprehensive platform that enables platform teams to manage and deploy AI applications with enterprise-grade governance, security, and control. + +## Prerequisites + +Before getting started with Tyk AI Studio, you need to obtain a license from Tyk Technologies. Contact support@tyk.io or your account manager to request your AI Studio license. + +## Key Features + + + +Tyk AI Studio enables platform teams to: +- Centralise access credentials to AI vendors, including commercial and in-house offerings +- Log and measure AI usage across the organization +- Build and release Chatbots for internal collaboration +- Ingest data into a knowledge corpus with siloed access settings +- Create AI functions as a service for common automations +- Script complex multi-modal assistants that intelligently select which AI vendor, model, and data set to use +- Implement role-based access control to democratise LLM access while maintaining security + +## Enterprise AI Challenges + +Organizations implementing AI face several key challenges: + +* **Shadow AI**: Unauthorized tools running without governance or oversight +* **Data privacy and compliance**: Meeting regulatory requirements while enabling innovation +* **Security and access control**: Implementing proper authentication and authorization +* **Cost management**: Controlling expenses from unmonitored AI usage + +## Integrate AI with Confidence + +Tyk AI Studio helps organizations harness AI's potential while ensuring proper governance, security, compliance, and control. Purpose-built for enterprises, this AI gateway and management solution enables seamless governance that overcomes the risks and challenges of AI adoption. + + + +## Solution Components + +Tyk AI Studio provides a comprehensive suite of capabilities to manage, govern, and interact with AI across your organization: + +### Centralized AI management + +Unify and control AI usage across your organization: +- Govern AI with role-based access control, rate limiting and audit logging +- Monitor usage, costs, budgets, and performance in real time +- Manage how LLMs are accessed and used, with an AI gateway as a single point of control +- Ensure compliance with global privacy regulations through customizable data flow management + +### AI Gateway + +Seamlessly connect to AI tools and models: +- Proxy to large language models (LLMs) and integrate custom data models and tools +- Use the [AI Gateway](/ai-management/ai-studio/proxy) to enable secure, scalable access to AI services across teams +- Track usage statistics, cost breakdowns, and tool utilization to optimize resources + +### AI Portal + +Empower developers with a curated AI service catalog: +- Simplify access to AI tools and services through a unified portal +- Enable seamless integration with internal systems and external workflows +- Accelerate innovation by providing developers with the tools they need to build faster + +### AI Chat + +Bring AI-powered collaboration to every user: +- Deliver intuitive chat interfaces for direct interaction with AI tools and data sources +- Enable teams to access AI-driven insights through a unified, secure chat experience +- Foster collaboration and innovation across your organization + +## Benefits + +Tyk AI Studio empowers organizations to adopt AI securely and efficiently, delivering: + +- **Centralized governance and control:** Consistency at the core of your business for enhanced security, compliance, troubleshooting and auditing +- **Strengthened security:** Peace of mind from strict access controls, secure interactions and region-specific compliance +- **Simplified workflows:** Reduced complexity and enhanced efficiency supporting developers and less technical users to work with multiple LLMs and tools +- **Trusted data privacy:** Rigorous compliance with data protection standards, reducing risk of reputational, operational and financial damage +- **Seamless integration:** Enhanced workflows in customer support, development, and marketing with trusted AI tools +- **Cost optimization:** Control over expenses and accountability, enabling smarter budgets + +## Use Cases + +Proxying LLM traffic through the AI Gateway delivers control, visibility, and scalability across various scenarios: + +- **Interact with your APIs:** Connect your API management to enable API interaction for wider teams +- **Banking and financial services:** Ensure only anonymized customer data is sent to LLMs, tracking usage by department to manage costs +- **Software development:** Leverage AI for code suggestions and issue tracking in Jira +- **Data governance:** Audit and secure AI interactions to meet regulatory standards +- **Healthcare:** Route LLM traffic through an AI gateway to comply with HIPAA, protecting patient data while enabling AI-driven insights +- **E-commerce:** Integrate LLMs with product catalogs, allowing employees to query inventory or sales data through a chat interface + +## MCP servers in AI Studio + +AI Studio provides comprehensive [MCP (Model Context Protocol) capabilities](/ai-management/mcps/overview#mcp-for-enterprise-use) including: + +- **Remote MCP catalogues and server support** – Expose internal APIs and tools to AI assistants securely without requiring local installations +- **Secure local MCP server deployment** – Deploy MCP servers within controlled environments, integrated with Tyk AI Gateway for monitoring and governance +- **Ready-to-use MCP integrations** – Including API to MCP conversion, Dashboard API access, and searchable documentation access + +For more details about Model Context Protocol (MCP) integration, please visit the [Tyk MCPs overview](/ai-management/mcps/overview) page. + +
+ + + diff --git a/ai-management/ai-studio/proxy.mdx b/ai-management/ai-studio/proxy.mdx new file mode 100644 index 000000000..a5a1eda71 --- /dev/null +++ b/ai-management/ai-studio/proxy.mdx @@ -0,0 +1,71 @@ +--- +title: "Proxy & API Gateway" +description: "How AI Gateway works?" +keywords: "AI Studio, AI Management, AI Gateway, AI Proxy" +sidebarTitle: "AI Gateway" +--- + +The Tyk AI Studio Proxy is the central gateway for all Large Language Model (LLM) interactions within the platform. It acts as a secure, observable, and policy-driven entry point, managing requests from client applications to the configured backend LLM services. + +## Purpose + +The Proxy serves several critical functions: + +* **Unified Access Point:** Provides a single, consistent endpoint for applications to interact with various LLMs. +* **Security Enforcement:** Handles authentication, authorization, and applies security policies. +* **Policy Management:** Enforces rules related to budget limits, model access, and applies custom [Filters](/ai-management/ai-studio/filters). +* **Observability:** Logs detailed analytics data for each request, feeding the [Analytics & Monitoring](/ai-management/ai-studio/analytics) system. +* **Vendor Abstraction:** Hides the complexities of different LLM provider APIs, especially through the OpenAI-compatible endpoint. + +## Core Functions + +1. **Request Routing:** Incoming requests include a `routeId` in their path (e.g., `/proxy/{routeId}/...` or `/openai/{routeId}/...`). The Proxy uses this `routeId` to identify the target [LLM Configuration](/ai-management/ai-studio/llm-management) and route the request accordingly. + +2. **Authentication & Authorization:** + * Validates the API key provided by the client application. + * Identifies the associated Application and User. + * Checks if the Application/User group has permission to access the requested LLM Configuration based on [RBAC rules](/ai-management/ai-studio/user-management). + +3. **Policy Enforcement:** Before forwarding the request to the backend LLM, the Proxy enforces policies defined in the LLM Configuration or globally: + * **Budget Checks:** Verifies if the estimated cost exceeds the configured [Budgets](/ai-management/ai-studio/llm-management) for the App or LLM. + * **Model Access:** Ensures the requested model is allowed for the specific LLM configuration. + * **Filters:** Applies configured request [Filters](/ai-management/ai-studio/filters) to modify the incoming request payload. + +4. **Analytics Logging:** After receiving the response from the backend LLM (and potentially applying response Filters), the Proxy logs detailed information about the interaction (user, app, model, tokens used, cost, latency, etc.) to the [Analytics](/ai-management/ai-studio/analytics) database. + +## Endpoints + +Tyk AI Studio typically exposes two primary types of proxy endpoints: + +### 1. OpenAI-Compatible Endpoint (`/openai/{routeId}/v1/...`) + +* **Purpose:** This endpoint mimics the official OpenAI API structure. It allows developers to use standard OpenAI SDKs (Python, Node.js, etc.) to interact with *any* LLM configured in Tyk AI Studio, regardless of the actual backend vendor (Anthropic, Google Vertex AI, etc.). +* **Translation:** Tyk AI Studio includes a translation layer (using libraries like `langchaingo`) that converts standard OpenAI API requests into the format required by the target backend LLM (defined in the `{routeId}` configuration) and translates the backend LLM's response back into the standard OpenAI format. +* **Benefits:** Simplifies integration significantly, allowing developers to write code once and target multiple LLM backends managed by Tyk AI Studio. + + ```bash + # Example using OpenAI Python SDK + import openai + + client = openai.OpenAI( + base_url="https://your-ai-studio-host/openai/my-anthropic-config/v1", + api_key="YOUR_AI_STUDIO_APP_API_KEY" + ) + + response = client.chat.completions.create( + model="claude-3-opus-20240229", # Model allowed in 'my-anthropic-config' + messages=[{"role": "user", "content": "Hello!"}] + ) + print(response.choices[0].message.content) + ``` + +### 2. Direct Proxy Endpoint (`/proxy/{routeId}/...`) + +* **Purpose:** Provides a more direct pass-through to the backend LLM, potentially with less translation than the OpenAI-compatible endpoint. The exact request/response format expected at this endpoint might depend more heavily on the specific backend LLM vendor configured for the `{routeId}`. +* **Usage:** Might be used for accessing vendor-specific features not covered by the OpenAI API standard or in scenarios where the OpenAI translation layer is not desired. + +## Configuration & Security + +The behavior of the Proxy for a specific route is determined by the corresponding [LLM Configuration](/ai-management/ai-studio/llm-management), which includes details about the backend vendor, model access, budget limits, and associated filters. + +By centralizing LLM access through the Proxy, Tyk AI Studio provides a robust layer for security, control, and observability over AI interactions. diff --git a/ai-management/ai-studio/quickstart.mdx b/ai-management/ai-studio/quickstart.mdx new file mode 100644 index 000000000..67ec58484 --- /dev/null +++ b/ai-management/ai-studio/quickstart.mdx @@ -0,0 +1,236 @@ +--- +title: "Installation Guide - Docker/Packages" +description: "Install Tyk AI Studio using Docker Compose or native packages" +keywords: "AI Studio, AI Management, Installation" +sidebarTitle: "Docker/Packages (Recommended)" +--- + +This guide will help you install and configure Tyk AI Studio using Docker Compose or native packages. This is the recommended installation method for most users. + +## Prerequisites + +### License Requirement +- A valid Tyk AI Studio license from Tyk Technologies. Contact support@tyk.io or your account manager to obtain your license. + +### For Docker Compose Installation +- Docker and Docker Compose installed on your system +- PostgreSQL database (recommended for production) - if not provided, SQLite will be used as fallback + +### For Package Installation +- Linux system with systemd +- PostgreSQL database (strongly recommended) - if not configured, SQLite will be used as fallback + +## Installation Methods + +### Method 1: Docker Compose Installation + +1. Create a new directory for your project: + ```bash + mkdir tyk-ai-studio && cd tyk-ai-studio + ``` + +2. Create a `compose` directory and add the following Docker Compose file: + ```bash + mkdir compose && cd compose + ``` + +3. Create a `compose.yaml` file with the following content: + ```yaml + version: "3" + services: + ai-studio: + image: tykio/tyk-ai-studio:latest + volumes: + - ./confs/.env:/app/.env + environment: + - DATABASE_URL=postgres://postgres:postgres@postgres:5432/postgres + - DATABASE_TYPE=postgres + depends_on: + postgres: + condition: service_healthy + ports: + - 8080:8080 # Main application port + - 9090:9090 # Gateway server port + + postgres: + image: postgres:latest + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=postgres + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 5s + timeout: 5s + retries: 5 + ``` + +4. Create a configuration directory and environment file: + ```bash + mkdir -p confs + touch confs/.env + ``` + +5. Add your configuration to the `.env` file (example): + + **For PostgreSQL (recommended):** + ```env + ALLOW_REGISTRATIONS=true + ADMIN_EMAIL=you@tyk.io + SITE_URL=http://localhost:8080 + FROM_EMAIL=noreply@tyk.io + DATABASE_URL=postgres://postgres:postgres@postgres:5432/postgres + DATABASE_TYPE=postgres + TYK_AI_SECRET_KEY=a35b3f7b0fb4dd3a048ba4fc6e9fe0a8cb804d7884c62b6b2ea09c99612c4405 + FILTER_SIGNUP_DOMAINS=tyk.io + TYK_AI_LICENSE=XXXX + # Optional SMTP settings + # SMTP_SERVER=smtp.sendgrid.net + # SMTP_PORT=587 + # SMTP_USER=apikey + # SMTP_PASS= + ``` + + **For SQLite (development only):** + ```env + ALLOW_REGISTRATIONS=true + ADMIN_EMAIL=you@tyk.io + SITE_URL=http://localhost:8080 + FROM_EMAIL=noreply@tyk.io + DATABASE_URL=tyk-ai-studio.db + DATABASE_TYPE=sqlite + TYK_AI_SECRET_KEY=a35b3f7b0fb4dd3a048ba4fc6e9fe0a8cb804d7884c62b6b2ea09c99612c4405 + FILTER_SIGNUP_DOMAINS=tyk.io + TYK_AI_LICENSE=XXXX + ``` + + > **Note:** PostgreSQL is strongly recommended for production use. SQLite is only suitable for development and testing. + +#### Starting the Service + +1. Start the services using Docker Compose: + ```bash + docker compose up -d + ``` + +2. Verify that the services are running: + ```bash + docker compose ps + ``` + +#### Accessing the Portal + +Once the services are running: + +- Access the AI Portal interface at: `http://localhost:8080` +- Access the Gateway at: `http://localhost:9090` + +#### Monitoring Logs + +To view the logs from the services: +```bash +docker compose logs -f +``` + +#### Stopping the Service + +To stop and remove the containers: +```bash +docker compose down +``` + +### Method 2: Package Installation + +1. Add the Tyk package repository: + ```bash + # For Ubuntu/Debian systems + curl -fsSL https://packagecloud.io/tyk/tyk-ee/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/tyk-ee-archive-keyring.gpg + echo "deb [signed-by=/usr/share/keyrings/tyk-ee-archive-keyring.gpg] https://packagecloud.io/tyk/tyk-ee/ubuntu/ $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/tyk-ee.list + + # For RHEL/CentOS systems + curl -s https://packagecloud.io/install/repositories/tyk/tyk-ee/script.rpm.sh | sudo bash + ``` + +2. Install the package: + ```bash + # For Ubuntu/Debian + sudo apt update + sudo apt install tyk-ai-studio + + # For RHEL/CentOS + sudo yum install tyk-ai-studio + ``` + +3. Configure the application: + ```bash + sudo nano /etc/tyk-ai-studio/.env + ``` + + Add your configuration (similar to Docker Compose example above). Ensure you configure PostgreSQL for production: + ```env + DATABASE_URL=postgres://username:password@localhost:5432/tyk_ai_studio + DATABASE_TYPE=postgres + TYK_AI_LICENSE=your-license-key-here + # ... other configuration options + ``` + + > **Note:** The `TYK_AI_LICENSE` environment variable is required for the service to start. Contact support@tyk.io or your account manager if you need to obtain a license. + +4. Start the service: + ```bash + sudo systemctl enable tyk-ai-studio + sudo systemctl start tyk-ai-studio + ``` + +5. Check service status: + ```bash + sudo systemctl status tyk-ai-studio + ``` + +## Service Components + +The Docker Compose setup includes: + +- **Tyk AI Studio Service**: The main AI Portal application + - Runs on ports 8080 (web interface) and 9090 (gateway server) + - Connects to PostgreSQL for data storage + - Uses environment variables for configuration + +- **PostgreSQL Database**: + - Stores application data + - Uses default credentials (configurable via environment variables) + +## First User Registration + +After starting the service, you need to create your first admin user: + +1. **Access the application**: Open your browser and navigate to `http://localhost:8080` +2. **Register with admin email**: Use the EXACT email address you set in the `ADMIN_EMAIL` environment variable +3. **Complete registration**: The first user who registers with the admin email will automatically become the administrator + + + + + **Important**: The first user registration must use the same email address specified in the `ADMIN_EMAIL` environment variable. This user will have full administrative privileges. + + + +## Next Steps + +Once you've completed the installation and registered your first user: + +1. **Configure your first LLM**: Add connections to AI providers like OpenAI, Anthropic, or Azure OpenAI +2. **Set up user management**: Create additional users and configure permissions +3. **Explore the AI Portal**: Try the chat interface and explore available tools + +Continue to the [First Steps guide](/ai-management/ai-studio/configuration) for detailed configuration instructions. + +## Troubleshooting + +If you encounter issues: + +1. Check that all required ports (8080, 9090) are available +2. Ensure your `.env` file contains valid API keys and the correct `ADMIN_EMAIL` +3. Verify that Docker and Docker Compose are properly installed +4. Check the logs for any error messages: `docker compose logs -f` +5. **Registration issues**: Make sure you're using the exact email address from `ADMIN_EMAIL` \ No newline at end of file diff --git a/ai-management/ai-studio/secrets.mdx b/ai-management/ai-studio/secrets.mdx new file mode 100644 index 000000000..e797007e9 --- /dev/null +++ b/ai-management/ai-studio/secrets.mdx @@ -0,0 +1,67 @@ +--- +title: "Secrets Management" +description: "How to configure secret management in Tyk AI Studio?" +keywords: "AI Studio, AI Management, Secret Management" +sidebarTitle: "Secrets Management" +--- + +Tyk AI Studio provides a secure mechanism for storing and managing sensitive information, such as API keys, passwords, authentication tokens, or other credentials required by various platform configurations. + +## Purpose + +The Secrets Management system aims to: + +* **Prevent Exposure:** Avoid hardcoding sensitive values directly in configuration files or UI fields. +* **Centralize Management:** Provide a single place to manage and update credentials. +* **Enhance Security:** Store sensitive data encrypted at rest. + +## Core Concepts + +* **Secret:** A named key-value pair where the 'key' is the **Variable Name** used for reference, and the 'value' is the actual sensitive data (e.g., an API key string). + +* **Encryption:** + * Secret values are **encrypted at rest** within Tyk AI Studio's storage. + * The encryption and decryption process relies on a key derived from the **`TYK_AI_SECRET_KEY` environment variable** set when running the Tyk AI Studio instance. + * **CRITICAL:** The `TYK_AI_SECRET_KEY` must be kept confidential and managed securely. Loss of this key will render existing secrets unusable. Changing the key will require re-entering all secrets. + +* **Reference Syntax:** Secrets are referenced within configuration fields (like API key fields in LLM or Tool setups) using a specific syntax: + ``` + $SECRET/VariableName + ``` + Replace `VariableName` with the exact name given to the secret when it was created (e.g., `$SECRET/OPENAI_API_KEY`, `$SECRET/JIRA_AUTH_TOKEN`). + +* **Runtime Resolution:** When a configuration uses a field containing a secret reference (e.g., `$SECRET/MY_KEY`): + 1. The configuration itself stores the `$SECRET/MY_KEY` string, *not* the actual secret value. + 2. Only when the system needs the actual value at runtime (e.g., the [Proxy](/ai-management/ai-studio/proxy) preparing a request for an LLM, or a [Tool](/ai-management/ai-studio/tools) calling its external API), Tyk AI Studio retrieves the encrypted secret value, decrypts it using the `TYK_AI_SECRET_KEY`, and injects the plain text value into the operation. + 3. The decrypted value is typically used immediately and not persisted further. + +## Creating & Managing Secrets (Admin) + +Administrators manage secrets via the Tyk AI Studio UI or API: + +1. Navigate to the Secrets management section. +2. Create a new secret by providing: + * **Variable Name:** A unique identifier (letters, numbers, underscores) used in the `$SECRET/VariableName` reference. + * **Secret Value:** The actual sensitive string (e.g., `sk-abc123xyz...`). +3. Save the secret. It is immediately encrypted and stored. + + Secrets UI + +Secrets can be updated or deleted as needed. Updating a secret value will automatically apply the new value wherever the `$SECRET/VariableName` reference is used, without needing to modify the configurations themselves. + +## Usage Examples + +Secrets are commonly used in: + +* **[LLM Configurations](/ai-management/ai-studio/llm-management):** Storing API keys for providers like OpenAI, Anthropic, Google Vertex AI, etc. + * *Example:* In the API Key field for an OpenAI configuration: `$SECRET/OPENAI_API_KEY` +* **[Tool Configurations](/ai-management/ai-studio/tools):** Storing API keys, authentication tokens (Bearer, Basic Auth), or other credentials needed to interact with the external API the tool represents. + * *Example:* In a field for a custom header for a JIRA tool: `Authorization: Basic $SECRET/JIRA_BASIC_AUTH_TOKEN` +* **[Data Source Configurations](/ai-management/ai-studio/datasources-rag):** Storing API keys or connection credentials for vector databases (e.g., Pinecone, Milvus) or embedding service providers. + * *Example:* In the API Key field for a Pinecone vector store: `$SECRET/PINECONE_API_KEY` + +## Security Considerations + +* **Protect `TYK_AI_SECRET_KEY`:** This is the master key for secrets. Treat it with the same level of security as database passwords or root credentials. Use environment variable management best practices. +* **Principle of Least Privilege:** Grant administrative access (which includes secrets management) only to trusted users. +* **Regular Rotation:** Consider policies for regularly rotating sensitive credentials by updating the Secret Value in Tyk AI Studio. diff --git a/ai-management/ai-studio/sso.mdx b/ai-management/ai-studio/sso.mdx new file mode 100644 index 000000000..ba2576127 --- /dev/null +++ b/ai-management/ai-studio/sso.mdx @@ -0,0 +1,71 @@ +--- +title: "SSO Integration" +description: "How to configure SSO in Tyk AI Studio?" +keywords: "AI Studio, AI Management, Single Sign On" +sidebarTitle: "SSO Integration" +--- + +Tyk AI Studio supports Single Sign-On (SSO) integration, allowing users to authenticate using their existing credentials from external Identity Providers (IdPs). This simplifies login, enhances security, and centralizes user management. + +## Purpose + +The SSO integration aims to: + +* Allow users to log in to Tyk AI Studio using their familiar corporate or social identity credentials. +* Eliminate the need for separate Tyk AI Studio-specific passwords. +* Improve security by leveraging the organization's existing IdP infrastructure and policies (e.g., MFA). +* Streamline user provisioning and de-provisioning (depending on IdP capabilities and configuration). + + + + +## Technology: Tyk Identity Broker (TIB) + +Tyk AI Studio leverages the embedded **Tyk Identity Broker (TIB)** component to handle SSO integrations. TIB acts as a bridge between Tyk AI Studio (the Service Provider or SP) and various external Identity Providers (IdPs). + +## Supported Protocols & Providers + +TIB enables Tyk AI Studio to integrate with IdPs supporting standard protocols, including: + +* **OpenID Connect (OIDC):** Commonly used by providers like Google, Microsoft Entra ID (Azure AD), Okta, Auth0. +* **SAML 2.0:** Widely used in enterprise environments (e.g., Okta, Ping Identity, ADFS). +* **LDAP:** For integration with traditional directory services like Active Directory. +* **Social Logins:** Providers like GitHub, GitLab, etc. (often via OIDC). + +## Configuration (Admin) + +Administrators configure SSO providers within the Tyk AI Studio administration interface (likely via TIB's configuration settings exposed through Tyk AI Studio): + +1. **Select Protocol:** Choose the appropriate protocol (OIDC, SAML, etc.). +2. **Provider Details:** Enter the specific configuration details required by the chosen protocol and IdP. + * **OIDC Example:** Client ID, Client Secret, Issuer URL, Discovery Endpoint. + * **SAML Example:** IdP SSO URL, IdP Issuer/Entity ID, IdP Public Certificate, SP Entity ID (Tyk AI Studio's identifier). +3. **Profile Mapping:** Configure how attributes received from the IdP (e.g., email, name, group memberships) map to Tyk AI Studio user profiles. + * Identify which IdP attribute contains the unique user identifier (e.g., `email`, `sub`, `preferred_username`). + * Map IdP attributes to Tyk AI Studio user fields (e.g., `given_name` -> First Name, `family_name` -> Last Name). +4. **Group Mapping (Optional but Recommended):** Configure rules to automatically assign users to Tyk AI Studio [Groups](/ai-management/ai-studio/user-management) based on group information received from the IdP. + * *Example:* If the IdP sends a `groups` claim containing "Tyk AI Studio Admins", map this to automatically add the user to the "Administrators" group in Tyk AI Studio. +5. **Enable Provider:** Activate the configured IdP for user login. + + SSO Config UI + +## Login Flow + +When SSO is enabled: + +1. User navigates to the Tyk AI Studio login page. +2. User clicks a button like "Login with [Your IdP Name]" (e.g., "Login with Google", "Login with Okta"). +3. User is redirected to the external IdP's login page. +4. User authenticates with the IdP (using their corporate password, MFA, etc.). +5. Upon successful authentication, the IdP redirects the user back to Tyk AI Studio (via TIB) with an authentication assertion (e.g., OIDC ID token, SAML response). +6. TIB validates the assertion and extracts user profile information. +7. Tyk AI Studio finds an existing user matching the unique identifier or provisions a new user account based on the received profile information (Just-In-Time Provisioning). +8. Group memberships may be updated based on configured mapping rules. +9. The user is logged into Tyk AI Studio. + +## Benefits + +* **Improved User Experience:** One less password to remember. +* **Enhanced Security:** Leverages established IdP security policies. +* **Centralized Control:** User access can often be managed centrally via the IdP. +* **Simplified Onboarding/Offboarding:** User access to Tyk AI Studio can be tied to their status in the central IdP. diff --git a/ai-management/ai-studio/teams.mdx b/ai-management/ai-studio/teams.mdx new file mode 100644 index 000000000..67c50c541 --- /dev/null +++ b/ai-management/ai-studio/teams.mdx @@ -0,0 +1,125 @@ +--- +title: "Teams View for Tyk AI Studio" +description: "Overview of teams in AI Studio?" +keywords: "AI Studio, AI Management, Teams" +--- + +# Teams View for Tyk AI Studio + +The Teams View allows administrators to manage role-based access control by organizing users into teams. Teams define permissions and access levels across the portal, enabling streamlined user management. + +--- + +#### **Table Overview** +The teams are displayed in a tabular format with the following columns: + +1. **ID**: + A unique identifier assigned to each team for easy reference. + +2. **Name**: + The name of the team, describing its role or purpose (e.g., "Solutions Architects," "Customer Support"). + +3. **Actions**: + A menu (represented by three dots) allowing administrators to perform additional actions on a team, such as editing its details, managing permissions, or deleting it. + +--- + +#### **Features** +1. **Add Team Button**: + Located in the top-right corner of the view, this green button allows administrators to create a new team. Clicking the button opens a form to configure the team's name, permissions, and members. + +2. **Pagination Dropdown**: + Found at the bottom-left corner of the table, this dropdown allows administrators to select how many teams are displayed per page (e.g., 10, 20, or more teams). + +--- + +#### **Role-Based Access Control** +Each team represents a set of users with shared permissions. Teams help to: +- Grant or restrict access to specific features or sections of the portal. +- Streamline permission management by assigning roles at the team level instead of individually for each user. +- Enhance security by ensuring users only have access to the resources they need. + +--- + +The Teams View is a critical tool for managing access control efficiently, ensuring that users have the appropriate permissions based on their roles within the organization. + +### Teams Quick Actions in Tyk AI Studio + +The Teams View includes a set of quick actions accessible via the **Actions** menu (three-dot icon) for each team. These actions allow administrators to make modifications on the fly without navigating to separate pages. + +--- + +#### **Quick Actions Overview** + +1. **Add Catalogue to Team**: + Associates a general catalogue of resources with the team. Catalogues are bundles of LLMs, tools, and data sources that the team can access. + +2. **Add Data Catalogue to Team**: + Specifically links a data catalogue to the team. This grants access to specific datasets and data resources. + +3. **Add Tool Catalogue to Team**: + Assigns a tool catalogue to the team, enabling access to specific tools and utilities defined for the team. + +4. **Add User to Team**: + Opens an interface to add a new user to the selected team. This action facilitates user-role assignment directly from the Teams View. + +5. **Edit Team**: + Redirects to an editing interface where the team's name, description, and permissions can be modified. + +6. **Delete Team**: + Permanently removes the team from the portal. This action may require confirmation to prevent accidental deletions. + +--- + +#### **Efficiency in Team Management** +These quick actions streamline team management by allowing administrators to update access, assign resources, or modify user roles without navigating away from the Teams View. This improves workflow efficiency, especially in environments with frequent updates or large user bases. + +### Team Details View for Tyk AI Studio + +The **Team Details View** allows administrators to review and modify the access credentials and object ownership of a specific team. This includes managing users and assigning catalogues for various resources. Below is an explanation of the elements and actions available: + +--- + +#### **Team Information** +- **Name**: + Displays the name of the selected team (e.g., "Solutions Architects"). This provides context for the resources and users associated with the team. + +--- + +#### **Users in Team** +- **List of Users**: + Displays the names of all users currently assigned to the team (e.g., Martin, Leonid, Ahmet). + - Each user entry includes a **delete icon** (trash bin) for removing the user from the team. + +- **Add User Button**: + A green button labeled **+ ADD USER** that allows administrators to add a new user to the team. Clicking this opens a user selection interface. + +--- + +#### **Catalogues in Team** +Catalogues grant access to resources, tools, or data collections that are assigned to the team. + +1. **Catalogues in Team**: + - Displays a list of general catalogues assigned to the team. + - Includes a **+ ADD CATALOGUE** button to add new catalogues. + +2. **Data Catalogues in Team**: + - Lists all data catalogues assigned to the team. + - Includes a **+ ADD DATA CATALOGUE** button for adding new data catalogues. + +3. **Tool Catalogues in Team**: + - Lists all tool catalogues associated with the team (e.g., "Solution Architects"). + - Includes a **+ ADD TOOL CATALOGUE** button to add additional tool catalogues. + +- **Delete Icons**: + Each catalogue entry includes a delete icon (trash bin) for removing the catalogue from the team. + +--- + +#### **Navigation** +- **Back to Teams**: + A link in the top-right corner that returns the administrator to the Teams List View without saving any changes made in the current view. + +--- + +This Team Details View provides a centralized interface for managing team resources and user roles. It ensures that administrators can efficiently update permissions and access rights, maintaining the organization's security and productivity standards. diff --git a/ai-management/ai-studio/tools.mdx b/ai-management/ai-studio/tools.mdx new file mode 100644 index 000000000..18d534583 --- /dev/null +++ b/ai-management/ai-studio/tools.mdx @@ -0,0 +1,109 @@ +--- +title: "Interact with Tools" +description: "How to integrate tools in Tyk AI Studio?" +keywords: "AI Studio, AI Management, Tools" +sidebarTitle: "Tools & Extensibility" +--- + +Tyk AI Studio's Tool System allows Large Language Models (LLMs) to interact with external APIs and services, dramatically extending their capabilities beyond simple text generation. This enables LLMs to perform actions, retrieve real-time data, and integrate with other systems. + +## Purpose + +Tools bridge the gap between conversational AI and external functionalities. By defining tools, you allow LLMs interacting via the [Chat Interface](/ai-management/ai-studio/chat-interface) or API to: + +* Access real-time information (e.g., weather, stock prices, database records). +* Interact with other software (e.g., search JIRA tickets, update CRM records, trigger webhooks). +* Perform complex calculations or data manipulations using specialized services. + +## Core Concepts + +* **Tool Definition:** A Tool in Tyk AI Studio is essentially a wrapper around an external API. Its structure and available operations are defined using an **OpenAPI Specification (OAS)** (v3.x, JSON or YAML). +* **Allowed Operations:** From the provided OAS, administrators select the specific `operationIds` that the LLM is permitted to invoke. This provides granular control over which parts of an API are exposed. +* **Authentication:** Tools often require authentication to access the target API. Tyk AI Studio handles this securely by integrating with [Secrets Management](/ai-management/ai-studio/secrets). You configure the authentication method (e.g., Bearer Token, Basic Auth) defined in the OAS and reference a stored Secret containing the actual credentials. +* **Privacy Levels:** Each Tool is assigned a privacy level. This level is compared against the privacy level of the [LLM Configuration](/ai-management/ai-studio/llm-management) being used. A Tool can only be used if its privacy level is less than or equal to the LLM's level, preventing sensitive tools from being used with potentially less secure or external LLMs. + + Privacy levels define how data is protected by controlling LLM access based on its sensitivity: + - Public – Safe to share (e.g., blogs, press releases). + - Internal – Company-only info (e.g., reports, policies). + - Confidential – Sensitive business data (e.g., financials, strategies). + - Restricted (PII) – Personal data (e.g., names, emails, customer info). +* **Tool Catalogues:** Tools are grouped into logical collections called Catalogues. This simplifies management and access control. +* **Filters:** Optional [Filters](/ai-management/ai-studio/filters) can be applied to tool interactions to pre-process requests sent to the tool or post-process responses received from it (e.g., for data sanitization). +* **Documentation:** Administrators can provide additional natural language documentation or instructions specifically for the LLM, guiding it on how and when to use the tool effectively. +* **Dependencies:** Tools can declare dependencies on other tools, although the exact usage pattern might vary. + +## How it Works + +When a user interacts with an LLM via the [Chat Interface](/ai-management/ai-studio/chat-interface): + +1. The LLM receives the user prompt and the definitions of available tools (based on user group permissions and Chat Experience configuration). +2. If the LLM determines that using one or more tools is necessary to answer the prompt, it generates a request to invoke the specific tool operation(s) with the required parameters. +3. Tyk AI Studio intercepts this request. +4. It validates the request, checks permissions, and retrieves necessary secrets for authentication. +5. Tyk AI Studio applies any configured request Filters. +6. It calls the external API defined by the Tool. +7. It receives the response from the external API. +8. Tyk AI Studio applies any configured response Filters. +9. It sends the tool's response back to the LLM. +10. The LLM uses the tool's response to formulate its final answer to the user. + +## Creating & Managing Tools (Admin) + +Administrators define and manage Tools via the UI or API: + +1. **Define Tool:** Provide a name, description, and privacy level. +2. **Upload OpenAPI Spec:** Provide the OAS document (JSON/YAML). +3. **Select Operations:** Choose the specific `operationIds` the LLM can use. +4. **Configure Authentication:** Select the OAS security scheme and link to a stored [Secret](/ai-management/ai-studio/secrets) for credentials. +5. **Add Documentation:** Provide natural language instructions for the LLM. +6. **Assign Filters (Optional):** Add request/response filters. + + Tool Config + +## Organizing & Assigning Tools (Admin) + +* **Create Catalogues:** Group related tools into Tool Catalogues (e.g., "CRM Tools", "Search Tools"). +* **Assign to Groups:** Assign Tool Catalogues to specific Teams. This grants users in those groups *potential* access to the tools within the catalogue. + + Catalogue Config + +## Using Tools (User) + +Tools become available to end-users through multiple access methods: + +### Chat Interface Access + +Tools become available within the [Chat Interface](/ai-management/ai-studio/chat-interface) if: + +1. The specific Chat Experience configuration includes the relevant Tool Catalogue. +2. The user belongs to a Team that has been assigned that Tool Catalogue. +3. The Tool's privacy level is compatible with the LLM being used in the Chat Experience. + +The LLM will then automatically decide when to use these available tools based on the conversation. + +### AI Portal Tool Catalogue + +The [AI Portal](/ai-management/ai-studio/ai-portal) provides a dedicated **Tool Catalogue** where users can: + +* **Browse Available Tools:** View all tools they have access to, similar to how they can browse LLMs and Data Sources. +* **Subscribe to Tools:** Add tools to their applications as part of the app creation process. +* **Access Documentation:** View tool-specific documentation and usage guidelines. +* **Manage Subscriptions:** Control which tools are included in their various applications. + +### API Access + +Developers can access tools programmatically through the Tyk AI Studio APIs: + +* **Tool Discovery API:** Retrieve lists of available tools and their specifications. +* **Tool Invocation API:** Execute tool operations directly via REST endpoints. +* **Application Integration:** Include tools in [Apps](/ai-management/ai-studio/apps) for streamlined API access. + +### MCP (Model Context Protocol) Access + +Tools can also be accessed through the **Model Context Protocol (MCP)**, providing: + +* **Standardized Interface:** Use MCP-compatible clients to interact with tools in a vendor-neutral way. +* **Enhanced Integration:** Connect tools to MCP-enabled applications and AI frameworks. +* **Protocol Compliance:** Leverage the growing ecosystem of MCP-compatible tools and clients. + +This multi-access approach ensures that tools can be utilized across different interfaces and integration patterns, from simple chat interactions to complex programmatic integrations. diff --git a/ai-management/ai-studio/user-management.mdx b/ai-management/ai-studio/user-management.mdx new file mode 100644 index 000000000..e2ae13347 --- /dev/null +++ b/ai-management/ai-studio/user-management.mdx @@ -0,0 +1,83 @@ +--- +title: "User Management & RBAC" +description: "How to configure user management in Tyk AI Studio?" +keywords: "AI Studio, AI Management, User Management, RBAC" +sidebarTitle: "User Management & RBAC" +--- + +Tyk AI Studio includes a comprehensive system for managing users, their authentication methods, and controlling their access to platform resources using Teams and Role-Based Access Control (RBAC). + +## Purpose + +The User Management & RBAC system provides administrators with the tools to: + +* Manage the lifecycle of user accounts. +* Define how users authenticate (UI sessions, API keys). +* Organize users into logical teams. +* Grant fine-grained access to Tyk AI Studio resources (LLMs, Tools, Data Sources, Chat Experiences) based on team membership. +* Assign platform-level permissions using roles. + +## Core Concepts + +* **User:** Represents an individual interacting with Tyk AI Studio. Users are typically identified by an email address or username and can be created manually by administrators, via invitation, self-registration (if enabled), or provisioned through [SSO Integration](/ai-management/ai-studio/sso). +* **Authentication:** The process of verifying a user's identity. + * **Session-based:** For users logging into the Tyk AI Studio UI (using username/password or SSO). + * **API Key:** For applications or scripts interacting with Tyk AI Studio APIs (like the [Proxy](/ai-management/ai-studio/proxy)). +* **API Key:** A unique, long-lived token generated by a user. Applications use this key (typically in an `Authorization: Bearer ` header) to authenticate requests on behalf of the user who generated it. +* **Team:** A collection of users. Teams are the primary mechanism for assigning access rights to resources. A user can belong to multiple teams. +* **Resource:** Any entity within Tyk AI Studio whose access needs to be controlled. This includes: + * [LLM Configurations](/ai-management/ai-studio/llm-management) + * Tool Catalogues (collections of [Tools](/ai-management/ai-studio/tools)) + * Data Source Catalogues (collections of [Data Sources](/ai-management/ai-studio/datasources-rag)) + * Chat Experiences (configurations for the [Chat Interface](/ai-management/ai-studio/chat-interface)) +* **Role:** Defines a set of broad, platform-level permissions. Common roles include: + * **Admin:** Full access to configure and manage the Tyk AI Studio platform. + * **Standard User:** Access to use assigned resources (e.g., chat, query LLMs) but limited or no administrative capabilities. +* **RBAC (Role-Based Access Control):** Tyk AI Studio's access control model. Access is granted primarily by assigning resource access to **Teams**, and then adding **Users** to those Teams. **Roles** provide overarching platform permissions. +* **User Entitlements:** The complete set of permissions a specific user has at any given time. This is calculated based on their assigned Role and the combined permissions granted through all the Teams they belong to. Systems like the Proxy check these entitlements before allowing an action. + +## User Lifecycle Management (Admin) + +Administrators manage users via the UI or API: + +* **Creation:** Create user accounts manually, send invitations, or manage users provisioned via SSO. +* **Team Assignment:** Add or remove users from various Teams. +* **Role Assignment:** Assign a primary Role (e.g., Admin, Standard) to each user. +* **Status Management:** Activate or deactivate user accounts. +* **API Key Management:** Admins may have visibility into user API keys (though users typically generate their own). + + User Management UI + +## Team Management (Admin) + +Teams are central to managing permissions: + +* **Creation/Deletion:** Create and manage teams (e.g., "Developers", "Sales Team", "Product Docs Users"). +* **User Assignment:** Add/remove users from teams. +* **Resource Assignment:** Grant access to specific LLM Configurations, Tool Catalogues, or Data Source Catalogues *to the team*. Any user in that team inherits this access. + + Group Management UI + +## Authentication Methods + +* **UI Login:** Users access the web interface by logging in with their credentials (username/password) or via a configured [SSO Provider](/ai-management/ai-studio/sso). This establishes a browser session. +* **API Key Authentication:** + 1. A user generates an API Key via their profile settings in the UI. + 2. The user securely provides this key to their application or script. + 3. The application includes the key in the `Authorization` header for requests to Tyk AI Studio APIs: + ``` + Authorization: Bearer + ``` + 4. Tyk AI Studio validates the key and associates the request with the user who generated it. + +## Access Control Flow Example (API Request) + +When an application makes a request to the [Proxy](/ai-management/ai-studio/proxy) using an API Key: + +1. **Key Validation:** Tyk AI Studio validates the API Key. +2. **User Identification:** The system identifies the User associated with the key. +3. **Team Membership:** The system determines all Teams the User belongs to. +4. **Resource Check:** The request targets a specific resource (e.g., an LLM Configuration via its `routeId`). +5. **Permission Verification:** Tyk AI Studio checks if *any* of the user's Teams have been granted access to the requested resource. +6. **Entitlement Check:** Additional checks based on the user's Role and specific entitlements might occur (e.g., budget checks, model restrictions). +7. **Access Granted/Denied:** If all checks pass, the request proceeds; otherwise, it's denied (e.g., 401 Unauthorized or 403 Forbidden). diff --git a/ai-management/ai-studio/users.mdx b/ai-management/ai-studio/users.mdx new file mode 100644 index 000000000..e0e6750d9 --- /dev/null +++ b/ai-management/ai-studio/users.mdx @@ -0,0 +1,92 @@ +--- +title: "Users List View for Tyk AI Studio" +description: "How to configure Users in AI Studio?" +keywords: "AI Studio, AI Management, Users" +--- + +The Users List View provides administrators with an overview of all registered users on the platform. This interface allows for managing users' access and permissions. Below is a detailed explanation of its components: + +--- + +#### **Table Overview** +The table contains user-specific data displayed in rows and organized into the following columns: + +- **ID**: + A unique identifier assigned to each user for easy referencing. + +- **Name**: + The full name of the user as registered in the system. + +- **Email**: + The user's email address, which serves as their primary contact and login credential. + +- **Is Admin**: + Indicates whether the user has administrative privileges. + - **Yes**: The user has admin access and can perform higher-level management tasks. + - **No**: The user does not have admin privileges and is limited to standard user capabilities. + +- **Actions**: + A menu (represented by three dots) that allows administrators to perform additional actions for each user, such as editing user details, managing permissions, or removing a user from the system. + +--- + +#### **Features** +- **Add User Button**: + Located in the top-right corner of the view, this green button allows administrators to add a new user to the system. Upon clicking, a form or modal is expected to open for entering the new user's details. + +- **Pagination Dropdown**: + Found at the bottom-left corner of the table, this dropdown allows administrators to select how many users are displayed per page (e.g., 10, 20, or more users). + +--- + +### Add or Edit Users + +The User Form allows administrators to create new user accounts on the platform. Below is a detailed explanation of the fields and options available in this form: + +--- + +#### **Form Fields** +1. **Name** *(Required)*: + A text field where the administrator enters the full name of the new user. + +2. **Email** *(Required)*: + A text field for the user's email address, which serves as their primary login identifier. + +3. **Password** *(Required)*: + A password field where the administrator sets an initial password for the user. + +--- + +#### **User Role and Permissions** +The form provides toggles to configure the user's role and access permissions: + +1. **Admin User**: + A toggle switch to grant the user administrative privileges. + - **Enabled**: The user becomes an admin with elevated permissions. + - **Disabled**: The user remains a standard user. + +2. **Show Portal**: + A toggle switch to grant or restrict access to the portal interface. + - **Enabled**: The user can access portal features. + - **Disabled**: Portal access is restricted. + +3. **Show Chat**: + A toggle switch to enable or disable the user's access to chat functionality. + - **Enabled**: The user can utilize chat features. + - **Disabled**: Chat features are hidden from the user. + +--- + +#### **Action Button** +- **Add User**: + A button located at the bottom of the form that finalizes the creation of the user account. This button becomes active only after all required fields are completed. + +--- + +#### **Navigation** +- **Back to Users**: + A link in the top-right corner that navigates the administrator back to the Users List View without saving changes. + +--- + +This form provides a streamlined way for administrators to add and configure new user accounts, ensuring flexibility in assigning roles and managing access permissions. diff --git a/ai-management/mcps/api-to-mcp.mdx b/ai-management/mcps/api-to-mcp.mdx new file mode 100644 index 000000000..c9f311ed2 --- /dev/null +++ b/ai-management/mcps/api-to-mcp.mdx @@ -0,0 +1,238 @@ +--- +title: "Natural-language interaction with your APIs (API to MCP)" +description: "Enable AI assistants to safely and dynamically interact with your existing APIs using Tyk's API to MCP tooling." +keywords: "AI MCP, API-to-MCP, Tyk AI MCP" +sidebarTitle: "API to MCP" +--- + +## Overview + +**API to MCP** enables AI assistants to safely and dynamically interact with your existing APIs. It allows non-technical users to access API functionality through natural language, while developers retain full control over what endpoints are exposed and how they are accessed. + +This allows AI tools to interpret, invoke, and structure API operations without requiring any backend modifications. + +**Use this tool to:** +- Expose your APIs for AI interaction +- Allow AI assistants to understand and call API operations +- Configure basic access controls (e.g., filtering operations and setting headers) to manage how AI tools interact with your APIs + +If you're looking for quick setup, [jump to Quick Start](#quick-start). For deeper understanding, see [How It Works](#how-it-works) and [Use Cases](#use-cases). + +```mermaid +graph LR + A["Your OpenAPI"] --> B["Tyk API-to-MCP Tool"] + B --> C["Tyk MCP Server"] + D["AI Assistant"] <--> C + C <--> E["Your API"] + + style A fill:#ffffff,stroke:#D1D1E0,stroke-width:1px,font-size:18px + style B fill:#d5f5e3,stroke:#D1D1E0,stroke-width:1px,font-size:18px + style C fill:#d5f5e3,stroke:#D1D1E0,stroke-width:1px,font-size:18px + style D fill:#eeeeee,stroke:#D1D1E0,stroke-width:1px,font-size:18px + style E fill:#ffffff,stroke:#D1D1E0,stroke-width:1px,font-size:18px + + linkStyle default stroke-width:2px +``` + +## Key Features +- **Dynamic OpenAPI Loading:** Load specifications from local files or HTTP/HTTPS URLs +- **OpenAPI Overlay Support:** Apply overlays to customize specifications +- **Flexible Operation Filtering:** Include/exclude specific operations using glob patterns +- **Comprehensive Parameter Handling:** Preserves formats and includes metadata +- **Built-in Access Control & Security:** Control which endpoints are exposed, enforce authentication (API keys, OAuth, etc.), and add custom headers to all API requests, for secure AI access +- **Authentication Support:** Handles API keys, OAuth tokens, and other security schemes +- **MCP Extensions:** Support for custom x-mcp extensions to override tool names and descriptions +- **Multiple Integration Options:** Works with Claude Desktop, Cursor, Vercel AI SDK, and other MCP-compatible environments + +Check the [complete features list](https://github.com/TykTechnologies/api-to-mcp#features) is available in Tyk's *api-to-mcp* GitHub repository. + +## Quick Start + + +To get started quickly, the primary way to use it is by configuring your AI assistant to run it directly as an MCP tool. + +### Requirements +- [Node.js v18+](https://nodejs.org/en/download) installed +- An accessible OpenAPI specification, e.g. `https://petstore3.swagger.io/api/v3/openapi.json` (could be a local file as well) +- Claude Desktop (which we show in this example) or other MCP-compatible AI assistant that supports connecting to external MCP-compatible tool servers (e.g. Cursor, Vercel AI SDK, Cline extension in VS Code etc.) + +### Configure your AI Assistant + +To connect the tool with Claude Desktop or other MCP-compatible assistants, you need to register it as an MCP server. Most AI assistance share similar MCP server definition. This is the definition for *api-to-mcp* with petstore as the OpenAPI: + +```json +{ + "mcpServers": { + "api-tools": { + "command": "npx", + "args": [ + "-y", + "@tyk-technologies/api-to-mcp@latest", + "--spec", + "https://petstore3.swagger.io/api/v3/openapi.json" + ], + "enabled": true + } + } +} +``` + +**Step 1.** +To enable the tool, paste the above configuration into your AI assistant’s MCP config file + +- **Claude Desktop**: For MacOS, you need to update `~/Library/Application Support/Claude/claude_desktop_config.json`. See the [Claude Desktop setup instructions](https://github.com/TykTechnologies/api-to-mcp?tab=readme-ov-file#setting-up-in-claude-desktop) for Windows OS and more customization options. +- **Cursor**: See the [Cursor setup guide](https://github.com/TykTechnologies/api-to-mcp#cursor) for instruction on setting it with Cursor. + +**Step 2.** +Once connected, ask the AI to perform an operation (e.g., "List all pets" or "Create a new user"). + +## How It Works + +### User flow in API to MCP user flow + +1. **Input**: Your OpenAPI specification (required) and optional overlays +2. **Processing**: The API to MCP tool loads the spec, applies any overlays, and transforms API operations into MCP tools +3. **Runtime**: The MCP server exposes these tools to AI assistants, which are now discoverable to the AI assistant +4. **Execution Flow**: When you ask the AI assistant a question, it calls the tool (via the MCP server), which translates the request, forwards it to your API, and returns a formatted response. + + +```mermaid +flowchart LR + subgraph "Input" + A["OpenAPI Specification"] + B["Optional Overlays"] + end + + subgraph "API to MCP Tool" + C["1. Load & Parse
OpenAPI Spec"] + D["2. Apply Overlays
(if provided)"] + E["3. Transform API Operations
into MCP Tools"] + F["MCP Server"] + end + + subgraph "Runtime" + G["4. AI Assistant
Discovers Tools"] + H["AI Assistant
Calls Tools"] + I["Your API"] + end + + A -->|YAML/JSON| C + B -->|Optional| D + C --> D + D --> E + E -->|Register Tools| F + F -->|Expose Tools| G + G --> H + H -->|Request| F + F -->|Translate & Forward| I + I -->|Response| F + F -->|Format & Return| H + + classDef input fill:#f9f,stroke:#333,stroke-width:2px,font-size:18px; + classDef tool fill:#bbf,stroke:#333,stroke-width:2px,font-size:18px; + classDef runtime fill:#bfb,stroke:#333,stroke-width:2px,font-size:18px; + + class A,B input; + class C,D,E,F tool; + class G,H,I runtime; + +linkStyle default stroke-width:2px +``` + +### Request lifecycle: how an AI assistant invokes an API tool + +The following diagram illustrates the flow of a request through the system at runtime: + +```mermaid +sequenceDiagram + participant AI as "AI Client" + participant MCP as "MCP Server" + participant Tool as "Tool Handler" + participant API as "API Client" + participant Target as "Target API" + + AI->>MCP: Invoke Tool + MCP->>Tool: Execute Tool Handler + Tool->>Tool: Extract Parameters + Tool->>Tool: Validate Input + Tool->>API: executeApiCall() + API->>API: Apply Security + API->>API: Construct Request + API->>Target: Make HTTP Request + Target-->>API: HTTP Response + + alt Successful Response + API-->>Tool: Success Response + Tool->>MCP: Format MCP Result + MCP-->>AI: Tool Execution Success + else Error Response + API-->>Tool: Error Response + Tool->>MCP: Map to MCP Error + MCP-->>AI: Tool Execution Error + end + +``` + +API to MCP can be found in [api-to-mcp GitHub repository](https://github.com/TykTechnologies/api-to-mcp) + +## Use cases + + +Use **API to MCP** when you need to: + +- **Connect AI Assistants to Existing APIs** - Let AI tools understand and call your existing API operations using natural language β€” no code changes needed, just [configuration](https://github.com/TykTechnologies/api-to-mcp/#configuration). + +- **Create a Unified Interface for AI Systems** - Standardize how APIs are accessed by AI across your organization with a consistent protocol (MCP). + +- **Control API Access for AI** - Filter which operations are available to AI, apply authentication, and monitor usage securely. + +- **Improve API Discoverability** - Enable AI systems to automatically list available endpoints, input parameters, and expected responses. + +- **Test APIs Using AI** - Use AI assistants to generate test inputs, invoke endpoints, and validate responses in a conversational way. Natural-language test generation and feedback. + +- **Auto-docs & validation** - Use AI to test, describe, or troubleshoot APIs in a conversational way. Natural-language test generation and feedback. + +- **Workflow Automation** - Connect APIs and AI logic in real time to automate workflows and streamline processes. + +--- + +## Best Practices + +- **Start small**: Only expose safe, limited endpoints +- **Use filters**: allow list or block list endpoints as needed +- **Secure your APIs**: Pass tokens, headers, or keys securely +- **Track usage**: Monitor tool access and patterns +- **Version specs**: Maintain OpenAPI version control +- **Use env vars**: Don't hardcode secrets in CLI or config +- **Validate safely**: Test in staging before going live + +--- + +## Customize your own version of the api-to-mcp tool + +If you'd like to share your MCP with a predefined OpenAPI spec and configuration, you can customize this tool to fit your needs. Useful for sharing pre-configured setups with others. + +By creating a customized version, others can use the tool with minimal configuration --- no need to manually specify specs or overlays. + +Refer to the [customization and publishing guide](https://github.com/TykTechnologies/api-to-mcp?tab=readme-ov-file#customizing-and-publishing-your-own-version) in the *api-to-mcp* repository for step-by-step instructions. + +--- + +## FAQs + +**Does this work with GraphQL?** +Not currently β€” OpenAPI REST APIs only. + +**How do I secure my API requests?** +Use `--headers`, environment variables. Check the [configuration section](https://github.com/TykTechnologies/api-to-mcp/tree/main#configuration) for more details. + +**Can I hide or rename tools?** +Yes β€” use `x-mcp` extensions and filters. + +**What AI tools are supported?** +Any tool that supports MCP: Claude, Cursor, VS Code, and more. + + +## Summary + +API to MCP transforms OpenAPI specs into AI-compatible tools using the MCP standard. It enables your AI stack to dynamically understand, test, and invoke your existing APIs securely β€” without modifying your existing backend. diff --git a/ai-management/mcps/dashboard-api-to-mcp.mdx b/ai-management/mcps/dashboard-api-to-mcp.mdx new file mode 100644 index 000000000..f0a4d7009 --- /dev/null +++ b/ai-management/mcps/dashboard-api-to-mcp.mdx @@ -0,0 +1,105 @@ +--- +title: "Natural-language interaction with Tyk Dashboard (API to MCP)" +description: "Talk to Tyk Dashboard like a person using AI tools" +keywords: "AI MCP, Dashboard API-to-MCP, Tyk Dashboard API MCP, Dashboard API, Talk to Tyk Dashboard, AI Management" +sidebarTitle: "Dashboard API to MCP" +--- + +## Overview + +Use `tyk-dashboard-mcp` to expose your **Tyk Dashboard API** to AI assistants like Claude, Cursor, or VS Code extensions β€” enabling natural-language interaction with your Tyk Dashboard. + +This tool is a preconfigured fork of [api-to-mcp GitHub repository](https://github.com/TykTechnologies/api-to-mcp), designed specifically for the *Tyk Dashboard* API. It comes bundled with a predefined OpenAPI spec and overlays, so you don’t need to configure much manually. + +Explore the core functionality in the [API to MCP guide](/ai-management/mcps/api-to-mcp). + + +## Use Cases + +Once connected, you, with your AI assistants, can perform helpful actions on your Tyk Dashboard using natural language. For example: +- **Explore your API landscape** - List APIs, describe endpoints in plain English, review policies +- **Query Dashboard settings for audits or support tasks** - List users and keys +- **Automate admin tasks** - Create or update API definitions (e.g., OAS-based APIs) through AI-driven flows, reducing manual clicks (please note that we haven't documented this just yet) +- **Power AI developer tools** - Use this as a backend for developer assistants like Claude, Cursor, or VS Code extensions to guide devs while on boarding and using Tyk Dashboard on daily basis. Ideal for internal use cases like AI-driven dashboards, documentation bots, or dev portals powered by LLMs. +- **Build internal chatbots** - Create internal tools that let team members ask questions like "What APIs are active?" or "What's the global rate limit defined API X?" + + +## Setup Instructions + +**Step 1.** Use the following MCP server config for Claude Desktop, Cursor, Cline or any other MCP-compatible tool: + +```json +{ + "mcpServers": { + "tyk-dashboard-api": { + "command": "npx", + "args": [ + "-y", + "@tyk-technologies/tyk-dashboard-mcp", + "--target", + "https://your-dashboard-domain.com", + "--headers", + "Authorization: $TYK_API_KEY" + ], + "enabled": true + } + } +} +``` + +Refer to your assistant’s docs for where to place this config β€” e.g. +- `claude_desktop_config.json` for [Claude configuration](https://modelcontextprotocol.io/quickstart/user#2-add-the-filesystem-mcp-server) +- `.cursor-config.json` for [Cursor configuration](https://docs.cursor.com/context/model-context-protocol#configuring-mcp-servers) +- `cline_mcp_settings.json` for [Cline configuration](https://docs.roocode.com/features/mcp/using-mcp-in-roo#configuring-mcp-servers) (as a VS Code extension). + +**Step 2.** +Once connected, ask your AI assistant to perform an operation (e.g., "List all apis" or "Create a new user"). + +## Examples + +Here you can see the response of asking the *Cline* in VS Code: + +1. Task: *Show me the Tyk dashboard api endpoint to create apis* + +Screenshot of the response to request of AI to create a new user + +
+ +2. Task: *Please create a new user in tyk dashboard* + +Screenshot of the response to request of AI to create a new user + +## Tips + +- You don’t need to manually define an OpenAPI spec β€” this tool includes the official Tyk Dashboard OpenAPI spec. +- You can fork or extend the tool if you want to include additional internal APIs alongside the dashboard. +- It's an open source and you can find it in [tyk-dashboard-mcp GitHub repository](https://github.com/TykTechnologies/tyk-dashboard-mcp) + +## FAQs + +**How is this different from `api-to-mcp`?** +`tyk-dashboard-mcp` is a customised version which is preconfigured for the Tyk Dashboard API. No need to specify your own spec. + +**Does this expose all dashboard functionality?** +Only the operations defined in the OpenAPI spec. You can customize the access list to show/hide more. In the following MCP server config the `--whitelist` to only allow access to getAPIs operation and to only allow to create Tyk OAS definitions: + +```json +{ + "mcpServers": { + "tyk-dashboard-api": { + "command": "npx", + "args": [ + "-y", + "@tyk-technologies/tyk-dashboard-mcp", + "--target", + "https://your-dashboard-domain.com", + "--headers", + "Authorization: $TYK_API_KEY", + "--whitelist", + "getApis*,POST:/api/apis/oas", + ], + "enabled": true + } + } +} +``` diff --git a/ai-management/mcps/overview.mdx b/ai-management/mcps/overview.mdx new file mode 100644 index 000000000..54b9a4f03 --- /dev/null +++ b/ai-management/mcps/overview.mdx @@ -0,0 +1,53 @@ +--- +title: "Tyk MCPs" +description: "A comprehensive guide to Model Context Protocol (MCP) servers in Tyk and how they extend AI capabilities." +keywords: "AI MCP, MCPs in Tyk, Model Context Protocol" +sidebarTitle: "Overview" +--- + +## MCP capabilities + +[Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) servers help AI systems securely interact with external services and tools. They establish structured, governed connections that integrate seamlessly with your Tyk environment. + +## What are MCPs? + +Model Context Protocol (MCP) servers extend AI systems by exposing external services, tools, and resources in a standardised way. They act as bridges between AI applications and external systems, securely managing authentication, access, and execution. + +With Tyk MCPs, your AI agents can: + +- Access external data sources and APIs +- Execute specialised tools and functions +- Interact with system resources +- Retrieve contextual information + +MCPs use a defined protocol to connect AI agents with external systems, expanding AI capabilities while maintaining governance and control. + +## Why standardisation matters + +The MCP specification standardises how AI agents discover and interact with external capabilities. This helps: + +- **Simplify integration** across diverse systems +- **Enhance security** through consistent architecture +- **Promote interoperability** with different vendor solutions +- **Improve governance** when managing AI systems at scale + +## MCP for Enterprise use + + +Tyk extends the MCP model for enterprise deployments with the following capabilities: + +- **Remote MCP catalogues and server support** – Expose internal APIs and tools to AI assistants securely without requiring local installations. +- **Secure local MCP server deployment** – Deploy MCP servers within controlled environments, integrated with Tyk AI Gateway for monitoring and governance. +- **Standardised protocols** – Maintain interoperability standards for seamless integration into existing workflows. + +These features enable enterprises to scale AI integrations securely while ensuring compliance and operational control. + +## Ready-to-use MCP options + +Tyk offers several ready-to-use MCP integrations: + +- **[API to MCP](/ai-management/mcps/api-to-mcp)** – Convert existing APIs (via OpenAPI/Swagger specs) into MCP-accessible tools. +- **[Dashboard API to MCP](/ai-management/mcps/dashboard-api-to-mcp)** – Expose the Tyk Dashboard API for management and monitoring. +- **[Tyk Docs MCP](/ai-management/mcps/tyk-docs-mcp)** – Provide AI access to searchable Tyk documentation. + +For more information on implementing MCPs, [contact the Tyk team](https://tyk.io/contact/) do discuss your specific use cases. diff --git a/ai-management/mcps/tyk-docs-mcp.mdx b/ai-management/mcps/tyk-docs-mcp.mdx new file mode 100644 index 000000000..7ac08e5bc --- /dev/null +++ b/ai-management/mcps/tyk-docs-mcp.mdx @@ -0,0 +1,92 @@ +--- +title: "Natural-language interaction with Tyk Docs (MCP)" +description: "Talk to Tyk documentation like a person using AI tools. Use Docs MCP to enable AI assistants to search and retrieve information from Tyk documentation." +keywords: "AI MCP, Tyk Docs, AI Documentation Search, Talk to Tyk Docs" +sidebarTitle: "Tyk Docs MCP" +--- + +## Overview + +*Tyk Docs MCP* is a tool [MCP server](https://github.com/modelcontext/spec) exposing the Tyk documentation to connected AI assistants. It enables the users of AI assistants "talk to" Tyk's documentation. Instead of searching manually, users can ask natural-language questions β€” and get answers backed by Tyk official docs. The tool makes AI-assisted support, troubleshooting, and documentation exploration fast and reliable. + + +Here you can see the AI assistant chooses to use Tyk Docs MCP (*Cline* in VS Code) while answering the query *How do I set a rate limit for a Tyk API?*: + +Screenshot of the response to request of AI to create a new user + + +Screenshot of the response to request of AI to create a new user + + +## Key Features + +- **Semantic search** β€” finds the most relevant content, not just keyword matches +- **Contextual results** β€” includes sections around your result for better understanding +- **Product filters** β€” limit results by product (Gateway, Dashboard, etc.) +- **Includes links** β€” jump straight into the relevant section of the docs +- **Answer snippets** β€” shows concise answers when possible +- **Always up to date** β€” syncs with the latest Tyk documentation + + +## Use Cases + + +Let AI do the digging β€” here’s how teams use Tyk Docs MCP: + +- **First-line support** - *How do I set a rate limit for a Tyk API?* +
AI can help answer questions about Tyk products, features, and usage cases. + +- **Feature implementation help** - *How do I enable JWT authentication in Tyk Gateway?* +
AI can help developers use Tyk's features by providing ad hoc step-by-step instructions and examples. + +- **Troubleshooting guidance** - *I'm seeing 'Auth field missing' error. What does that mean?* +
AI can help identify issues and provide guidance on how to resolve them. + +- **Fast API reference** - *What fields are in the /apis response?* +
AI can help developers quickly find the information they need to implement Tyk's features. + +- **Discover what’s possible** - *What analytics tools does Tyk include?* +
AI can help developers discover new ways to use Tyk's features and capabilities. + + +## Quick Start + +To get started quickly, the primary way to use it is by configuring your AI assistant to run it directly as an MCP tool. + +### Requirements +- [Node.js v18+](https://nodejs.org/en/download) installed +- Internet access to reach Tyk documentation index +- MCP-compatible AI assistant that supports connecting to external MCP-compatible tool servers (e.g. Claude, Cursor, Vercel AI SDK, Cline extension in VS Code etc.), + +### Configure your AI Assistant + +To connect the tool with MCP-compatible assistants, you need to register it as an MCP server. Most AI assistance share similar MCP server definition. This is the definition for *docs-mcp*: + +```json +{ + "mcpServers": { + "tyk-docs-search": { + "command": "npx", + "args": [ + "-y", + "@tyk-technologies/docs-mcp@latest" + ], + "enabled": true + } + } +} +``` + +**Step 1.** +To enable the tool, paste the above configuration into your AI assistant’s MCP config file + +- **Claude Desktop**: For MacOS, you need to update `~/Library/Application Support/Claude/claude_desktop_config.json`. See the [Claude Desktop setup instructions](https://github.com/TykTechnologies/api-to-mcp?tab=readme-ov-file#setting-up-in-claude-desktop) for Windows OS and more customization options. +- **Cursor**: See the [Cursor setup guide](https://github.com/TykTechnologies/api-to-mcp#cursor) for instruction on setting it with Cursor. + +**Step 2.** +Once connected, ask the AI to perform an operation as suggested in the [use cases above](#use-cases) + + +## How It Works under the hood + +Tyk Docs MCP tool is built on top of the [@buger/probe-docs-mcp project](https://github.com/buger/docs-mcp). This package comes with Tyk's documentation already bundled, offering a plug-and-play experience for users who want to integrate Tyk documentation into their AI assistants. diff --git a/ai-management/overview.mdx b/ai-management/overview.mdx new file mode 100644 index 000000000..22906c004 --- /dev/null +++ b/ai-management/overview.mdx @@ -0,0 +1,86 @@ +--- +title: "AI management" +description: "Tyk AI Management landing page. This page provides an overview of Tyk's AI management solutions including AI Studio and MCPs." +keywords: "Tyk AI management, AI Studio, Tyk MCPs" +sidebarTitle: "Overview" +--- + +As artificial intelligence becomes increasingly integrated into enterprise systems, organisations need structured, secure, and governed approaches to manage AI capabilities effectively. Tyk's AI management solutions are designed to help enterprises integrate, control, and scale AI applications while maintaining compliance and security. + +## Secure AI for the enterprise + +Tyk's AI management solutions address key challenges in AI governance, security, and integration. They enable organisations to deploy AI capabilities while maintaining oversight, managing risks, and meeting enterprise standards. + +## AI integration architecture and its importance + +Integrating AI into existing systems requires a structured architecture that connects models, APIs, and specialised tools securely and efficiently. + +A managed AI integration architecture provides: + +- **Standardisation** to ensure interoperability across AI components +- **Security** across AI workflows and data interactions +- **Governance** to monitor and control AI usage and data +- **Scalability** for enterprise-wide deployment and increasing complexity +- **Interoperability** across vendors and services + +Without a structured approach, organisations risk fragmented solutions, security gaps, and unmanaged AI usage ("shadow AI"). By integrating AI into existing systems, enterprises can achieve a more secure and efficient approach to AI management. + +## Tyk’s AI management capabilities + +Tyk provides two key solutions for AI management: + +### [AI Studio](/ai-management/ai-studio/overview) + +Tyk AI Studio is a platform for managing and deploying AI applications securely and at scale. It provides: + +- **Centralised governance** with role-based access control and compliance tracking +- **Cost management** through usage monitoring and budgeting tools +- **Security features** including unified access controls and credential management +- **Developer enablement** via curated AI service catalogues +- **Collaboration tools** through intuitive AI interfaces + +AI Studio supports enterprises in reducing unauthorised AI usage by providing central management across all AI interactions. + +[Explore AI Studio](/ai-management/ai-studio/overview) + +### [Tyk MCPs](/ai-management/mcps/overview) + +The [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) provides a standardised method for AI components to interact with external resources. + +With MCPs, organisations can: + +- **Integrate securely** with external AI providers and services +- **Build custom tools** for AI assistants and workflows +- **Access resources** such as files, APIs, and databases +- **Enhance AI workflows** with contextual information + +MCPs help expand AI system functionality by enabling secure, standardised interactions between services. + +[Explore Tyk MCPs](/ai-management/mcps/overview) + +### How AI Studio and MCPs work together + +Tyk AI Studio and Tyk MCPs are complementary: + +- **AI Studio** offers governance, monitoring, and development tooling for AI management. +- **Tyk MCPs** provide secure connections to external services and APIs. + +Together, they create a flexible, governed framework for managing AI applications at scale. + +## Next steps + +To start using Tyk's AI management capabilities: + +1. Explore the [AI Studio documentation](/ai-management/ai-studio/overview) +2. Review [Tyk MCPs](/ai-management/mcps/overview) and how they extend AI systems +3. [Request a demo](https://tyk.io/ai-demo/) to see the platform in action. + +## Key outcomes + +Tyk's AI management solutions are designed to: + +- **Reduce risk** through centralised access and monitoring +- **Improve efficiency** across AI development workflows +- **Enhance cost control** with usage and budgeting insights +- **Support compliance** with data protection and security standards +- **Enable scalable architectures** based on open protocols diff --git a/api-management/api-versioning.mdx b/api-management/api-versioning.mdx new file mode 100644 index 000000000..dbedf6f98 --- /dev/null +++ b/api-management/api-versioning.mdx @@ -0,0 +1,631 @@ +--- +title: "API Versioning" +description: "Create and manage multiple versions of an API" +keywords: "API versioning, version, Tyk Classic, Tyk OAS, API, versioning" +sidebarTitle: "API Versioning" +--- + +## Introduction + +API versioning is a crucial practice in API development and management that allows you to evolve your API over time while maintaining backward compatibility for existing clients. As your API grows and changes, versioning provides a structured way to introduce new features, modify existing functionality, or deprecate outdated elements without breaking integrations for users who rely on previous versions. + +API versioning is important for several reasons: +- **Flexibility**: It allows you to improve and expand your API without disrupting existing users. +- **Stability**: Clients can continue using a specific version of the API, ensuring their applications remain functional. +- **Transition management**: You can gradually phase out older versions while giving clients time to migrate to newer ones. +- **Documentation**: Each version can have its own documentation, making it easier for developers to understand the specific capabilities and limitations of the version they're using. + +--- + +## When to use API versioning + +There are many occasions when you might use versioning with your APIs, here are just a few examples. + +### Adding new features + +Imagine you're running an e-commerce API, and you want to introduce a new recommendation engine. Instead of modifying the existing endpoint and potentially breaking current integrations, you could create a new version of the API that includes this feature. This allows you to roll out the enhancement to interested clients while others continue using the previous version without disruption. + +### Changing response formats + +Let's say you have a weather API that currently returns temperatures in Fahrenheit. You decide to switch to Celsius for international standardization. By creating a new API version with this change, you can transition to the new format without affecting existing users who expect Fahrenheit readings. This gives clients time to adapt their applications to handle the new response format. + +### Deprecating outdated functionality + +If your financial API includes a legacy payment processing method that you plan to phase out, versioning allows you to create a new version without this feature. You can then encourage users to migrate to the new version over time, eventually deprecating the old version containing the outdated functionality. + +### Optimizing performance + +You might discover a more efficient way to structure your API requests and responses. By introducing these optimizations in a new version, you can offer improved performance to clients who are ready to upgrade, while maintaining the existing version for those who aren't prepared to make changes yet. + +## Sunsetting API versions + +API sunsetting is the process of phasing out or retiring an older version of an API or an entire API. It's a planned, gradual approach to ending support for an API or API version. To aid with the automation of this process, all Tyk API versions can be configured with an optional expiry date `expiration` (`expires` for Tyk Classic APIs), after which the API will no longer be available. If this is left blank then the API version will never expire. This is configured in standard ISO 8601 format. + +When sunsetting API versions, you may have endpoints that become deprecated between versions. It can be more user friendly to retain those endpoints but return a helpful error, instead of just returning `HTTP 404 Not Found`. + +This is easy to do with Tyk. You could, for example, include the deprecated endpoint in the new version of the API and configure the [mock response](/api-management/traffic-transformation/mock-response) middleware to provide your clients with relevant information and instruction. Alternatively, you could return a `HTTP 302 Found` header and redirect the user to the new endpoint. + +--- + +## How API versioning works with Tyk + +API versioning is supported for all APIs that can be deployed on Tyk, both Tyk OAS APIs (for REST and Streaming services) and Tyk Classic APIs (used for GraphQL and TCP services). There are differences in the approach and options available when using the two API formats: + +- With Tyk OAS APIs you're essentially creating distinct iterations of your API, each with its own API definition file, allowing almost complete differentiation of configuration between your API versions. +- With Tyk Classic APIs all versions of an API are configured from a single API definition. This means that they share many features with only a subset available to be configured differently between versions. + +For more details, see [this comparison](#comparison-between-tyk-oas-and-tyk-classic-api-versioning). + +Some key concepts that are important to understand when versioning your APIs are: + +- [Version identifiers](/api-management/api-versioning#version-identifiers) +- [Default version](/api-management/api-versioning#default-version) +- [Base and child versions](/api-management/api-versioning#base-and-child-apis) +- [Controlling access to versions](/api-management/api-versioning#controlling-access-to-versioned-apis) + +### Version Identifiers + +The version identifier is the method by which the API client specifies which version of an API it is addressing with each request. Tyk supports multiple [locations](/api-management/api-versioning#version-identifier-location) within the request where this identifier can be placed. Typically the value assigned in the version identifier will be matched to the list of versions defined for the API and, assuming that the client is [authorized to access](/api-management/api-versioning#controlling-access-to-versioned-apis) that version, Tyk will apply the version specific processing to the request. + +#### Version identifier location + +Tyk supports three different locations where the client can indicate which version of an API they wish to invoke with their request: + +- [Request URL (path)](/api-management/api-versioning#request-url-path) +- [Query parameter](/api-management/api-versioning#query-parameter) +- [Request header](/api-management/api-versioning#request-header) + +When choosing a version identifier location, consider your API design philosophy, infrastructure requirements, client needs, caching strategy, and backward compatibility concerns. Whichever method you choose, aim for consistency across your API portfolio to provide a uniform experience for your API consumers. + +##### Request URL (path) + +Including the version identifier in the path (for example `/my-api/v1/users`) is a widely used approach recognized in many API designs. The version identifier is clearly visible in the request and, with the unique URL, can simplify documentation of the different versions. Tyk can support the version identifier as the **first URL fragment** after the listen path, such that the request will take the form `//`. + +##### Query parameter + +Defining a query parameter that must be provided with the request (for example `/my-api/users?version=v1`) is easy to implement and understand. The version identifier is clearly visible in the request and can be easily omitted to target a default version. Many analytics tools can parse query parameters, making this a very analytics-friendly approach to versioning. + +##### Request header + +Defining a specific header that must be provided with the request (for example `x-api-version:v1`) keeps the URL *clean*, which can be aesthetically pleasing and easier to read. It works well with RESTful design principles, treating the version as metadata about the request and allows for flexibility and the ability to make changes to the versioning scheme without modifying the URL structure. Headers are less visible to users than the request path and parameters, providing some security advantage. Be aware that other proxies or caches might not consider headers for routing, which could bring issues with this method. + +#### Stripping version identifier + +Typically Tyk will pass all request headers and parameters to the upstream service when proxying the request. For a versioned API, the version identifier (which may be in the form of a header, path parameter or URL fragment) will be included in this scope and passed to the upstream. + +The upstream (target) URL will be constructed by combining the configured `upstream.url` (`target_url` for Tyk Classic APIs) with the full request path unless configured otherwise (for example, by using the [strip listen path](/api-management/gateway-config-tyk-oas#listenpath) feature). + +If the version identifier is in the request URL then it will be included in the upstream (target) URL. If you don't want to include this identifier, then you can set `stripVersioningData` (`strip_versioning_data` for Tyk Classic APIs) and Tyk will remove it prior to proxying the request. + +#### Version identifier pattern + +When using the [Request URL](/api-management/api-versioning#request-url-path) for the versioning identifier, if Tyk is configured to strip the versioning identifier then the first URL fragment after the `listenPath` (`listen_path` for Tyk Classic APIs) will be deleted prior to creating the proxy URL. If the request does not include a versioning identifier and Tyk is configured to [fallback to default](/api-management/api-versioning#fallback-to-default), this may lead to undesired behaviour as the first URL fragment of the endpoint will be deleted. + +In Tyk 5.5.0 we implemented a new configuration option `urlVersioningPattern` (`url_versioning_pattern` for Tyk Classic APIs) to the API definition where you can set a regex that Tyk will use to determine whether the first URL fragment after the `listenPath` is a version identifier. If the first URL fragment does not match the regex, it will not be stripped and the unaltered URL will be used to create the upstream URL. + +### Default version + +When multiple versions are defined for an API, one must be declared as the **default version**. If a request is made to the API without providing the version identifier, then this will automatically be treated as a request to the *default* version. This has been implemented to support future versioning of an originally unversioned API, as you can continue to support legacy clients with the default version. + +Tyk makes it easy for you to specify - and change - the *default* version for your APIs. + +#### Fallback to default + +The standard behaviour of Tyk, if an invalid version is requested in the version identifier, is to reject the request returning `HTTP 404 This API version does not seem to exist`. Optionally, Tyk can be configured to redirect these requests to the *default* version by configuring the `fallbackToDefault` option in the API definition (`fallback_to_default` for Tyk Classic APIs). + + +### Base and child APIs + +Tyk OAS introduces the concept of a **Base API**, which acts as a *parent* that routes requests to the different *child* versions of the API. The Base API stores the information required for Tyk Gateway to locate and route requests to the appropriate *child* APIs. + +The *child* APIs do not have any reference back to the *parent* and so can operate completely independently if required. Typically, and we recommend, the *child* versions should be configured as Internal APIs that are not directly reachable by clients outside Tyk. + +The Base API is a working version of the API and is usually the only one configured as an *External API*, so that client requests are handled (and routed) according to the configuration set in the Base API (via the version identifier included in the header, url or query parameter). + +You can configure a Tyk OAS API as a *Base* API by adding the `versioning` object to the `info` section in the Tyk Vendor Extension. This is where you will configure all the settings for the versioned API. The *child* APIs do not contain this information. + +Note that any version (*child* or *Base*) can be set as the [default version](/api-management/api-versioning#default-version). + +Tyk Classic APIs do not have the base and child concept because all versions share an API definition. + +### Controlling access to versioned APIs + +Tyk's access control model supports very granular permissions to versioned APIs using the `access_rights` assigned in the [access keys/tokens](/api-management/policies#what-is-a-session-object) or [security policies](/api-management/policies#what-is-a-security-policy) that are applied to keys. + +This means that you could restrict client access to only the [Base API](/api-management/api-versioning#base-and-child-apis), while allowing developers to create and test new versions independently. These will only be added to the "routing table" in the Base API when the API owner is ready and access keys could then be updated to grant access to the new version(s). + +Note that an access key will only have access to the `default` version if it explicitly has access to that version (e.g. if `v2` is set as default, a key must have access to `v2` to be able to [fallback to the default](/api-management/api-versioning#fallback-to-default) if the versioning identifier is not correctly provided in the request. + +When using Tyk OAS APIs each version of the API has a unique [API Id](/api-management/gateway-config-tyk-oas#info), so you simply need to identify the specific versions in the `access_rights` list in the key in the same way that you would add multiple different APIs to a single key. + + + +Creating a new version of a Tyk OAS API will not affect its API Id, so any access keys that grant access to the API will continue to do so, however they will not automatically be granted access to the new version (which will have a new API Id). + + + +When using Tyk Classic APIs you can explicitly grant access to specific versions of an API by specifying only those versions in the `versions` list in the key within the single entry for the API in the `access_rights` list. + +### Comparison between Tyk OAS and Tyk Classic API versioning + +As explained, there are differences between the way that versioning works for Tyk OAS and Tyk Classic APIs. + +These are largely due to the fact that a separate API definition is generated for each version of a Tyk OAS API, with one designated as the [base](/api-management/api-versioning#base-and-child-apis) version (which should be exposed on Tyk Gateway with the other (child) versions set to [internal](/api-management/traffic-transformation/internal-endpoint) visibility) whereas all versions of a Tyk Classic API are described by a single API definition. + +The Tyk Classic approach limits the range of features that can differ between versions. + +This table gives an indication of some of the features that can be configured per-version (βœ…) or only per-API (❌️) for Tyk OAS and Tyk Classic APIs. + +| Feature | Configurable in Tyk OAS versioning | Configurable in Tyk Classic versioning | +| :--------- | :------------------------------------ | :---------------------------------------- | +| Client-Gateway security | βœ… | ❌️ | +| Request authentication method | βœ… | ❌️ | +| API-level header transform | βœ… | βœ… | +| API-level request size limit | βœ… | βœ… | +| API-level rate limiting | βœ… | ❌️ | +| API-level caching | βœ… | ❌️ | +| Endpoints (method and path) | βœ… | βœ… | +| Per-endpoint middleware | βœ… | βœ… | +| Context and config data for middleware | βœ… | ❌️ | +| Custom plugin bundle | βœ… | ❌️ | +| Upstream target URL | βœ… | βœ… | +| Gateway-Upstream security | βœ… | ❌️ | +| Traffic log config | βœ… | ❌️ | +| API segment tags | βœ… | ❌️ | + +--- + +## Configuring API versioning in the API definition + +You can configure a Tyk OAS API as a [Base API](/api-management/api-versioning#base-and-child-apis) by adding the `info.versioning` [object](/api-management/gateway-config-tyk-oas#versioning) to the [Tyk Vendor Extension](/api-management/gateway-config-tyk-oas#tyk-vendor-extension). + +Some notes on this: + +- if the *base* version is to be used as the *default* then you can use the value `self` as the identifier in the `default` field +- in the `versions` field you must provide a list of key-value pairs containing details of the *child* versions: + - `id`: the unique API Id (`x-tyk-api-gateway.info.id`) assigned to the API (either automatically by Tyk or user-defined during API creation) + - `name`: an identifier for this version of the API, for example `v2` + +The *child API* does not require any modification to its API definition. The important thing is that its API Id must be added to the `versions` list in the *base API* definition. We strongly recommend, however, that you configure `info.state.internal` to `true` for all child APIs so that they can only be accessed via the *base API*. + + + +If you are using Tyk Classic APIs, please see [this section](/api-management/api-versioning#versioning-with-tyk-classic-apis). + + + +### Example Tyk OAS Base API + +In the following example, we configure a *Base API*: + +```json {hl_lines=["11-27"],linenos=true, linenostart=1} +{ + "info": { + "title": "example-base-api", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": {}, + "components": {}, + "x-tyk-api-gateway": { + "info": { + "versioning": { + "default": "v1", + "enabled": true, + "key": "x-api-version", + "location": "header", + "name": "v1", + "versions": [ + { + "id": "", + "name": "v2" + } + ], + "fallbackToDefault": true, + "stripVersioningData": false, + "urlVersioningPattern": "" + }, + "expiration": "2030-01-01 00:00", + "name": "example-base-api", + "state": { + "active": true, + "internal": false + } + }, + "server": { + "listenPath": { + "strip": true, + "value": "/example-base-api/" + } + }, + "upstream": { + "url": "http://httpbin.org/" + } + } +} +``` + +This API definition will configure Tyk Gateway to expect the `x-api-version` header to be provided and will invoke a version of the API as follows: +- if the header key has the value `v1` then the Base API will be processed +- if it is `v2` then the request will be forwarded internally to the API with API Id `` +- if any other value is provided in the header, then the `default` version will be used (in this instance, the Base API) because `fallbackToDefault` has been configured +- if the header is not provided, then the request will be handled by the `default` version (in this instance the Base API) + +This API version will automatically expire on the 1st January 2030 and stop accepting requests. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out API versioning - though it requires a valid API Id to be used in place of ``. + +--- + +## API versioning in the Tyk Dashboard API Designer + +You can use the API Designer in the Tyk Dashboard to manage versions for your APIs. + +### Configure versioning + +From Tyk 5.10, you can pre-configure the [versioning metadata](#how-api-versioning-works-with-tyk) for an API before you've created the first [child API](/api-management/api-versioning#base-and-child-apis). + +1. Choose the API for which you want to create a new version (this can be an unversioned or versioned API) and go to the **Versions** tab + + An unversioned Tyk OAS API in the API Designer + +2. Select **Edit** and you can pre-fill the following metadata: + + - [Version identifier location](/api-management/api-versioning#version-identifier-location) + - Version identifier name (or [pattern](/api-management/api-versioning#version-identifier-pattern) for URL versioning) + - Version name for the base API + - [Stripping identifier from upstream request](/api-management/api-versioning#stripping-version-identifier) + - [Default fallback behaviour](/api-management/api-versioning#fallback-to-default) + +3. Click **Save API** + +### Create a new child version + +You can easily add a new version in the Tyk Dashboard's API Designer by following these steps. + +1. Choose the API for which you want to create a new version (this can be an unversioned or versioned API) and go to the **Versions** tab + +2. Select **Add New Version** to open the version creation wizard. + + Creating a new version of a Tyk OAS API using the version creation wizard + + Choose whether to start from an existing API configuration or to start from a blank API template. If there's already at least one child version, you can select which version (child or base) you wish to use as the template for your new API. + +3. If you have not already [configured](/api-management/api-versioning#configure-versioning) the versioning for this API you will prompted to complete this information. + + Configuring the version identifier from the version creation wizard + + If you've already done this, then you'll just need to provide a unique identifier name for your new child API and choose whether to make the new version the default choice. + + Configuring the version identifier from the version creation wizard + + +4. The final step is to choose whether to publish your new version straight away or to keep it in draft until you've completed configuring it. You can also optionally choose to make the API externally accessible, which will allow direct calls to the child API not just via the base API. + + Configuring the version identifier from the version creation wizard + +5. Select **Create Version** to complete the wizard. Your new API will be created and you can now adjust the configuration as required. + + +### Working with versioned APIs + +When you have a versioned API the *Base API* will appear in the **Created APIs** list, with an expansion icon that you can select to reveal the versions. + +Versioned API shows in the Created APIs list + +Note that the *base* and *default* versions are highlighted with appropriate labels. You can reach the API Designer for each version in the usual way, by selecting the API name in the list (or from the **Actions** menu for the API). + +#### Switch between API versions + +When you are in the API Designer for a versioned API, you can switch between versions using the drop-down next to the API name. + +Choose between API versions in the API designer + +#### Manage version settings + +You can manage all versions of your API from the **Versions** tab + +Manage the different versions of your API + +- You can see the versioning metadata + - enter **Edit** mode to make changes + - note that the common metadata can only be edited from the base API. +- You can see a list of all versions for the API and, from the **Actions** menu: + - go directly to that version + - delete that version + - set the *default* version + +--- + +## Versioning with Tyk Classic APIs + +All configuration for versioning of Tyk Classic APIs is documented [here](/api-management/gateway-config-tyk-classic#tyk-classic-api-versioning). + +### Example versioned Tyk Classic API + +Here's an example of the minimal configuration that would need to be added to the API definition for a Tyk Classic API with two versions (`v1` and `v2`): + +```json {linenos=true, linenostart=1} +{ + "version_data": { + "not_versioned": false, + "default_version": "v1", + "versions": { + "v1": { + "name": "v1", + "expires": "", + "paths": { + "ignored": [], + "white_list": [], + "black_list": [] + }, + "use_extended_paths": true, + "extended_paths": { + "ignored": [], + "white_list": [], + "black_list": [], + "transform": [], + "transform_response": [], + "transform_jq": [], + "transform_jq_response": [], + "transform_headers": [], + "transform_response_headers": [], + "hard_timeouts": [], + "circuit_breakers": [], + "url_rewrites": [], + "virtual": [], + "size_limits": [], + "method_transforms": [], + "track_endpoints": [], + "do_not_track_endpoints": [], + "validate_json": [], + "internal": [], + "persist_graphql": [] + }, + "global_headers": {}, + "global_headers_remove": [], + "global_headers_disabled": false, + "global_response_headers": {}, + "global_response_headers_remove": [], + "global_response_headers_disabled": false, + "ignore_endpoint_case": false, + "global_size_limit": 0, + "override_target": "" + }, + "v2": { + "name": "v2", + "expires": "", + "paths": { + "ignored": [], + "white_list": [], + "black_list": [] + }, + "use_extended_paths": true, + "extended_paths": { + "ignored": [], + "white_list": [], + "black_list": [], + "transform": [], + "transform_response": [], + "transform_jq": [], + "transform_jq_response": [], + "transform_headers": [], + "transform_response_headers": [], + "hard_timeouts": [], + "circuit_breakers": [], + "url_rewrites": [], + "virtual": [], + "size_limits": [], + "method_transforms": [], + "track_endpoints": [], + "do_not_track_endpoints": [], + "validate_json": [], + "internal": [], + "persist_graphql": [] + }, + "global_headers": {}, + "global_headers_remove": [], + "global_headers_disabled": false, + "global_response_headers": {}, + "global_response_headers_remove": [], + "global_response_headers_disabled": false, + "ignore_endpoint_case": false, + "global_size_limit": 0, + "override_target": "http://httpbin.org/ip" + } + } + }, + "definition": { + "location": "header", + "key": "x-api-version", + "strip_versioning_data": false, + "fallback_to_default": true, + "url_versioning_pattern": "" + } +} +``` + +In this example, there are two versions of the API +- the version identifier is expected in a request header `x-api-version` +- the versions are named `v1` and `v2` +- the only difference between `v1` and `v2` is that `v2` will proxy the request to a different upstream via the configured `override_target` +- the default version (`default_version`) is `v1` +- if the request header contains an invalid version named (e.g. `v3`), it will be directed to the default (`fallback_to_default:true`) + +### Tyk Classic API versioning in the API Designer + +You can use the API Designer in the Tyk Dashboard to add and manage versions for your Tyk Classic APIs. + +#### Create a versioned API + +1. **Enable versioning** + + In the API Designer, navigate to the **Versions** tab. + + Enabling versioning for a Tyk Classic API + + Deselect the **Do not use versioning** checkbox to enable versioning and display the options. + +2. **Configure the versioning identifier** + + Choose from the drop-down where the version identifier will be located and, if applicable, provide the key name (for query parameter or request header locations). + + Configuring the versioning identifier + +3. **Add a new version** + + You will see the existing (`Default`) version of your API in the **Versions List**. You can add a new version by providing a version name (which will be the value your clients will need to provide in the version location when calling the API). + + You can optionally configure an **Override target host** that will replace the target path that was set in the base configuration for the version. Note that this is not compatible with Service Discovery or Load Balanced settings. + + Select **Add** to create this new version for your API. + + Adding a new version to your API + +4. **Set the default version** + + You can choose any of your API versions to act as the [default](/api-management/api-versioning#default-version). + + Choosing the default version for your API + + Select **Update** to save the changes to your API. + +#### Switch between versions of a Tyk Classic API + +When you are in the API Designer for a versioned Tyk Classic API, you can switch between versions from the **Edit Version** dropdown in the **Endpoint Designer** tab. + +Choosing the API version for which to configure endpoint middleware + +Remember to select **Update** to save the changes to your API. + +### Configuring Tyk Classic API versioning in Tyk Operator + + +When using Tyk Operator, you can configure versioning for a Tyk Classic API within `spec.definition` and `spec.version_data`. + +In the following example: + +- the version identifier is a header with the name `x-api-version` (comments demonstrate how to configure the alternative version identifier locations) +- the API has one version with the name `v1` +- the default version is set to `v1` +- an allow list, block list and ignore authentication middleware have been configured for version `v1` +- an alternative upstream URL (`override_target`) is configured for `v1` to send requests to `http://test.org` + +```yaml {linenos=table,hl_lines=["14-17", "25-27", "29-82"], linenostart=1} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: versioned-api +spec: + name: Versioned API + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://version-api.example.com + listen_path: /version-api + strip_listen_path: true + definition: + # Tyk should find version data in Header + location: header + key: x-api-version + + # Tyk should find version data in First URL Element + #location: url + + # Tyk should find version data in URL/Form Parameter + #location: url-param + #key: api-version + version_data: + default_version: v1 + not_versioned: false + versions: + v1: + name: v1 + expires: "" + override_target: "http://test.org" + use_extended_paths: true + extended_paths: + ignored: + - path: /v1/ignored/noregex + method_actions: + GET: + action: no_action + code: 200 + data: "" + headers: + x-tyk-override-test: tyk-override + x-tyk-override-test-2: tyk-override-2 + white_list: + - path: v1/allowed/allowlist/literal + method_actions: + GET: + action: no_action + code: 200 + data: "" + headers: + x-tyk-override-test: tyk-override + x-tyk-override-test-2: tyk-override-2 + - path: v1/allowed/allowlist/reply/{id} + method_actions: + GET: + action: reply + code: 200 + data: flump + headers: + x-tyk-override-test: tyk-override + x-tyk-override-test-2: tyk-override-2 + - path: v1/allowed/allowlist/{id} + method_actions: + GET: + action: no_action + code: 200 + data: "" + headers: + x-tyk-override-test: tyk-override + x-tyk-override-test-2: tyk-override-2 + black_list: + - path: v1/disallowed/blocklist/literal + method_actions: + GET: + action: no_action + code: 200 + data: "" + headers: + x-tyk-override-test: tyk-override + x-tyk-override-test-2: tyk-override-2 +``` + +## Creating Versioned APIs via the Tyk Dashboard and Gateway APIs + +As explained, you can directly [configure the version settings](/api-management/api-versioning#configuring-api-versioning-in-the-api-definition) within the Tyk OAS API definition using the `info.versioning` section of the Tyk Vendor Extension. + +Alternatively, Tyk can look after the linking of base and child versions if you are using the Tyk Dashboard API or Tyk Gateway API to manage your Tyk OAS APIs. + +If you are using Tyk Classic, then you should configure versioning within the API definition prior to creating the API. + +### Creating Base Version + +When creating the [base version](/api-management/api-versioning#base-and-child-apis) of your API, you do not need to do anything special - the version details will be added when you later create the first child version. + +### Creating Child Versions + +When you want to create a [child version](/api-management/api-versioning#base-and-child-apis) for an existing API using the Tyk Dashboard API or Tyk Gateway API, you must provide additional query parameters to link the child and base APIs. + +These parameters are common to the `POST /api/apis/oas` and `POST /tyk/apis/oas` endpoints: + +- `base_api_id`: The API ID of the Base API to which the new version will be linked. +- `base_api_version_name`: The version name of the base API while creating the first version. This doesn't have to be sent for the next versions but if it is set, it will override the base API version name. +- `new_version_name`: The version name of the created version. +- `set_default`: If true, the new version is set as default version. + +These options are also available when [updating an existing API definition](/api-management/gateway-config-managing-oas#updating-an-api) using the `PATCH /api/apis/oas` or `PATCH /tyk/apis/oas` endpoints. + +#### Version Settings + +When using the Tyk Gateway API or Tyk Dashboard API to create new child versions, the default versioning settings will be: + +- versioning identifier location: header +- versioning identifier key: `x-tyk-version` + +If you need to change these, you should do so within the `info.versioning` section of the API definition for the base version as explained [previously](/api-management/api-versioning#configuring-api-versioning-in-the-api-definition). diff --git a/api-management/authentication/basic-authentication.mdx b/api-management/authentication/basic-authentication.mdx new file mode 100644 index 000000000..e382a7a63 --- /dev/null +++ b/api-management/authentication/basic-authentication.mdx @@ -0,0 +1,194 @@ +--- +title: "Basic Authentication" +description: "How to configure basic authentication in Tyk?" +keywords: "Authentication, Authorization, Tyk Authentication, Tyk Authorization, Secure APIs, Basic Authentication" +sidebarTitle: "Basic Authentication" +--- + +## What is Basic Authentication? + +Basic Authentication is a straightforward authentication method where the user's credentials (username and password) are sent to the server, usually in a standard HTTP header. + +## How does Basic Authentication Work? + +The user credentials are combined and encoded in this form: + +``` +Basic base64Encode(username:password) +``` + +A real request could look something like: + +``` +GET /api/widgets/12345 HTTP/1.1 +Host: localhost:8080 +Authorization: Basic am9obkBzbWl0aC5jb206MTIzNDU2Nw== +Cache-Control: no-cache +``` + +In this example the username is `john@smith.com` and the password is `1234567` (see [base64encode.org](https://www.base64encode.org)) + +### The Problem with Basic Authentication + +With Basic Authentication, the authentication credentials are transferred from client to server as encoded plain text. This is not a particularly secure way to transfer the credentials as it is highly susceptible to intercept; as the security of user authentication is usually of critical importance to API owners, Tyk recommends that Basic Authentication should only ever be used in conjunction with additional measures, such as [mTLS](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls). + +## Configuring your API to use Basic Authentication + +The OpenAPI Specification indicates the use of [Basic Authentication](https://swagger.io/docs/specification/v3_0/authentication/basic-authentication/) in the `components.securitySchemes` object using the `type: http` and `scheme: basic`: + +```yaml +components: + securitySchemes: + myAuthScheme: + type: http + scheme: basic + +security: + - myAuthScheme: [] +``` + +With this configuration provided by the OpenAPI description, all that is left to be configured in the Tyk Vendor Extension is to enable authentication, to select this security scheme and to indicate where Tyk should look for the credentials. Usually the credentials will be provided in the `Authorization` header, but Tyk is configurable, via the Tyk Vendor Extension, to support custom header keys and credential passing via query parameter or cookie. + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + securitySchemes: + myAuthScheme: + enabled: true + header: + enabled: true + name: Authorization +``` + +Note that URL query parameter keys and cookie names are case sensitive, whereas header names are case insensitive. + +You can optionally [strip the user credentials](/api-management/client-authentication#managing-authorization-data) from the request prior to proxying to the upstream using the `authentication.stripAuthorizationData` field (Tyk Classic: `strip_auth_data`). + +### Multiple User Credential Locations + +The OpenAPI Specification's `securitySchemes` mechanism allows only one location for the user credentials, but in some scenarios an API might need to support multiple potential locations to support different clients. + +The Tyk Vendor Extension supports this by allowing configuration of alternative locations in the basic auth entry in `server.authentication.securitySchemes`. Building on the previous example, we can add optional query and cookie locations as follows: + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + securitySchemes: + myAuthScheme: + enabled: true + header: + enabled: true + name: Authorization + query: + enabled: true + name: query-auth + cookie: + enabled: true + name: cookie-auth +``` + +### Extract Credentials from the Request Payload + +In some cases, for example when dealing with SOAP, user credentials can be passed within the request body rather in the standard Basic Authentication format. You can configure Tyk to handle this situation by extracting the username and password from the body using regular expression matching (regexps). + +You must instruct Tyk to check the request body by adding the `extractCredentialsFromBody` field to the basic auth entry in `server.authentication.securitySchemes`, for example: + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + securitySchemes: + myAuthScheme: + enabled: true + extractCredentialsFromBody: + enabled: true + userRegexp: '(.*)' + passwordRegexp: '(.*)' +``` + +Note that each regexp should contain only one match group, which must point to the actual values of the user credentials. + +### Caching User Credentials + +The default behaviour of Tyk's Basic Authentication middleware is to cache user credentials, improving the performance of the authentication step when a client makes frequent requests on behalf of the same user. + +When a request is received, it presents credentials which are checked against the users registered in Tyk. When a match occurs and the request is authorized, the matching credentials are stored in a cache with a configurable refresh period. When future requests are received, Tyk will check the presented credentials against those in the cache first, before checking the full list of registered users. + +The cache will refresh after `cacheTTL` seconds (Tyk Classic: `basic_auth.cache_ttl`). + +If you do not want to cache user credentials, you can turn this off using `disableCaching` in the basic auth entry in `server.authentication.securitySchemes` (Tyk Classic: `basic_auth.disable_caching`). + +### Using Tyk Classic APIs + +As noted in the Tyk Classic API [documentation](/api-management/gateway-config-tyk-classic#configuring-authentication-for-tyk-classic-apis), you can select Basic Authentication using the `use_basic_auth` option. This will default to expect the user credentials in the `Authorization` header. + + +## Using Tyk Dashboard to Configure Basic Authentication + +Using the Tyk Dashboard, you can configure the Basic Authentication method from the Server section in the API Designer by enabling **Authentication** and selecting **Basic Authentication** from the drop-down: + +Target Details: Basic Auth + +- select the location(s) where Tyk should look for the token +- provide the key name for each location (we prefill the default `Authorization` for the *header* location, but you can replace this if required) +- optionally select [strip authorization data](/api-management/client-authentication#managing-authorization-data) to remove the auth token locations from the request prior to proxying to the upstream +- optionally configure the [basic authentication cache](/api-management/authentication/basic-authentication#caching-user-credentials) +- optionally configure [extraction of credentials from the request body](#extract-credentials-from-the-request-payload) + +## Registering Basic Authentication User Credentials with Tyk + +When using Basic Authentication, the API key used to access the API is not generated by the Tyk system, instead you need to create and register the credentials of your users with Tyk. Tyk will compare the credentials provided in the request against the list of users you have created. + +The way that this is implemented is through the creation of a key that grants access to the API (as you would for an API protected by [auth token](/api-management/authentication/bearer-token)), however for this key you will provide a username and password. + +When calling the API, users would never use the key itself as a token, instead their client must provide the Basic Auth credentials formed from the registered username and password, as [described previously](#how-does-basic-authentication-work). + + +### Using Tyk Dashboard UI + +You can use the Tyk Dashboard to register a user's Basic Authentication credentials that can then be used to access your API. + +Navigate to the **Keys** screen and select **Add Key**. + +Follow the instructions in the [access key guide](/api-management/gateway-config-managing-classic#access-an-api) and you'll notice that, when you select the Basic Auth protected API, a new **Authentication** tab appears: + +Note that the **Authentication** tab will also be displayed if you create a key from a policy that grants access to a Basic Auth protected API. + +Complete the user's credentials on this tab and create the key as normal. The key that is created in Tyk Dashboard is not in itself an access token (that is, it cannot be used directly to gain access to the API) but is used by Tyk to validate the credentials provided in the request and to determine the appropriate authorization, including expiry of authorization. + +### Using the Tyk Dashboard API + +You can register user credentials using the `POST /api/apis/keys/basic/{username}` endpoint in the [Tyk Dashboard API](/tyk-dashboard-api). The request payload is a [Tyk Session Object](/api-management/policies#what-is-a-session-object) (access key). + +- the user's *username* is provided as a path parameter +- the user's *password* is provided as `basic_auth_data.password` within the request payload + +You use the `POST` method to create a new user and `PUT` to update an existing entry. + + + +Be careful to ensure that the `org_id` is set correctly and consistently so that the Basic Authentication user is created in the correct organization. + + + +### Using the Tyk Gateway API + +You can register user credentials using the `POST /tyk/keys/{username}` endpoint in the [Tyk Dashboard API](/tyk-dashboard-api). The request payload is a [Tyk Session Object](/api-management/policies#what-is-a-session-object) (access key). + +- the user's *username* is provided as a path parameter +- the user's *password* is provided as `basic_auth_data.password` within the request payload + +You use the `POST` method to create a new user and `PUT` to update an existing entry. + + + +Be careful to ensure that the `org_id` is set correctly and consistently so that the Basic Authentication user is created in the correct organization. + + + + diff --git a/api-management/authentication/bearer-token.mdx b/api-management/authentication/bearer-token.mdx new file mode 100644 index 000000000..9dd3ba1dd --- /dev/null +++ b/api-management/authentication/bearer-token.mdx @@ -0,0 +1,105 @@ +--- +title: "Bearer Tokens" +description: "How to configure bearer tokens in Tyk?" +keywords: "Authentication, Authorization, Tyk Authentication, Tyk Authorization, Secure APIs, Bearer Tokens" +sidebarTitle: "Bearer Tokens" +--- + +## What is a bearer token ? + +> Any party in possession of an auth (or bearer) token (a "bearer") can use it to get access to the associated resources (without demonstrating possession of a cryptographic key). To prevent misuse, auth tokens need to be protected from disclosure in storage and in transport. + +Tyk provides auth (bearer) token access as one of the most convenient building blocks for managing security to your API. Tokens are added to a request as a header or as a query parameter. If added as a header, they may be preceded by the word "Bearer" to indicate their type, though this is optional. Usually these tokens are provided in the `Authorization` header, however Tyk can be configured to accept the token in a different header, as a query parameter or in a cookie. + +## Configuring your API to use Auth Token + +The OpenAPI Specification indicates the use of [Auth Token](https://swagger.io/docs/specification/v3_0/authentication/bearer-authentication/) in the `components.securitySchemes` object using `type: apiKey`. It also includes specification of the location (`in`) and key (`name`) that are to be used when providing the token to the API, for example: + +```yaml +components: + securitySchemes: + myAuthScheme: + type: apiKey + in: header + name: Authorization + +security: + - myAuthScheme: [] +``` + +With this configuration provided by the OpenAPI description, all that is left to be configured in the Tyk Vendor Extension is to enable authentication and to select this security scheme. + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + securitySchemes: + myAuthScheme: + enabled: true +``` + +Note that URL query parameter keys and cookie names are case sensitive, whereas header names are case insensitive. + +You can optionally [strip the auth token](/api-management/client-authentication#managing-authorization-data) from the request prior to proxying to the upstream using the `authentication.stripAuthorizationData` field (Tyk Classic: `strip_auth_data`). + +## Multiple Auth Token Locations + +The OpenAPI Specification's `securitySchemes` mechanism allows only one location for the auth token, but in some scenarios an API might need to support multiple potential locations to support different clients. + +The Tyk Vendor Extension supports this by allowing configuration of alternative locations in the auth token entry in `server.authentication.securitySchemes`. Building on the previous example, we can add optional query and cookie locations as follows: + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + securitySchemes: + myAuthScheme: + enabled: true + query: + enabled: true + name: query-auth + cookie: + enabled: true + name: cookie-auth +``` + +## Dynamic mTLS with Auth Token +The Auth Token method can support [Dynamic mTLS](/basic-config-and-security/security/mutual-tls/client-mtls#dynamic-mtls) where the client can provide a TLS certificate in lieu of a standard Auth Token. This can be configured for an API using the [enableClientCertificate](/api-management/gateway-config-tyk-oas#token) option (Tyk Classic: `auth.use_certificate`). + +## Auth Token with Signature + +If you are migrating from platforms like Mashery, which use request signing, you can enable signature validation alongside auth token by configuring the additional [signatureValidation](/api-management/gateway-config-tyk-oas#token) field (Tyk Classic: `auth.signature`). + +You can configure: + +- the location of the signature +- the algorithm used to create the signature (`MasherySHA256` or `MasheryMD5`) +- secret used during signature +- an allowable clock skew + +## Using Custom Auth Tokens + +If you have your own identity provider you may want to use that to generate and manage the access tokens, rather than having Tyk generate the tokens. You can use the `POST /tyk/keys/{keyID}` endpoint in the [Tyk Gateway API](/tyk-gateway-api) to import those tokens to Tyk, off-loading access control, quotas and rate limiting from your own application. + +## Using Tyk Dashboard to Configure Auth Token + +Using the Tyk Dashboard, you can configure the Auth Token authentication method from the Server section in the API Designer by enabling **Authentication** and selecting **Auth Token** from the drop-down: + +Configuring the Auth Token method + +- select the location(s) where Tyk should look for the token +- provide the key name for each location (we prefill the default `Authorization` for the *header* location, but you can replace this if required) +- select **Strip authorization data** to remove the auth token locations from the request prior to proxying to the upstream, as described [here](/api-management/client-authentication#managing-authorization-data) +- optionally select **Enable client certificate** to enable [Dynamic mTLS](/basic-config-and-security/security/mutual-tls/client-mtls#dynamic-mtls) for the API, so the client can provide a certificate in place of the token + +Note that the [auth token + signature](/api-management/authentication/bearer-token#auth-token-with-signature) option is not available in the Tyk Dashboard API Designer. + + +## Using Tyk Classic APIs + +As noted in the Tyk Classic API [documentation](/api-management/gateway-config-tyk-classic#configuring-authentication-for-tyk-classic-apis), a new Tyk Classic API will use the auth (bearer) token method by default with the token expected in the `Authorization` header, so configuration is slightly different as there is no need to `enable` this method. You should configure the `auth` object for any non-default settings, such as a different token location or Dynamic mTLS. + + + diff --git a/api-management/authentication/custom-auth.mdx b/api-management/authentication/custom-auth.mdx new file mode 100644 index 000000000..0d2055fcd --- /dev/null +++ b/api-management/authentication/custom-auth.mdx @@ -0,0 +1,19 @@ +--- +title: "Custom Authentication" +description: "How to implement custom authentication in Tyk using Go plugins, Python CoProcess, and JSVM plugins." +keywords: "Authentication, Authorization, Tyk Authentication, Tyk Authorization, Go Plugins, Python CoProcess, JSVM Plugin" +sidebarTitle: "Custom Authentication" +--- + +## Go Plugins + +Go Plugin Authentication allows you to implement custom authentication logic using the Go programming language. This method is useful for scenarios where you need to implement specialized authentication mechanisms that are not natively supported by Tyk. +To learn more about using Tyk Golang Plugins, go [here](/api-management/plugins/golang) + +## Use Python CoProcess and JSVM Plugin Authentication + +Tyk allows for custom authentication logic using Python and JavaScript Virtual Machine (JSVM) plugins. This method is useful for implementing unique authentication mechanisms that are tailored to your specific requirements. + +* See [Custom Authentication with a Python plugin](/api-management/plugins/rich-plugins#custom-authentication-plugin-tutorial) for a detailed example of a custom Python plugin. +* See [JavaScript Middleware](/api-management/plugins/javascript#) for more details on using JavaScript Middleware. + diff --git a/api-management/authentication/jwt-authorization.mdx b/api-management/authentication/jwt-authorization.mdx new file mode 100644 index 000000000..204ebbd09 --- /dev/null +++ b/api-management/authentication/jwt-authorization.mdx @@ -0,0 +1,340 @@ +--- +title: "JWT Authorization" +description: "How JWT authorization works in Tyk API Gateway." +keywords: "Authentication, Authorization, JWT, JSON Web Tokens, Claims, Validation" +sidebarTitle: "Authorization" +--- + +## Availability + +| Component | Editions | +| :------------- | :------------------------- | +| Tyk Gateway | Community and Enterprise | + +## Introduction + +[JSON Web Tokens (JWT)](https://www.jwt.io/introduction) are a popular method for client authentication and authorization that can be used to secure access to your APIs via Tyk's [JWT Auth](/basic-config-and-security/security/authentication-authorization/json-web-tokens) method. + +After the JWT signature has been [validated](/basic-config-and-security/security/authentication-authorization/json-web-tokens#signature-validation), Tyk uses the **claims** within the token to determine which security policies (access rights, rate limits and quotas) should be applied to the request. + +From Tyk 5.10, Tyk can perform optional [validation](/api-management/authentication/jwt-claim-validation) of these claims. + +In this page, we explain how Tyk performs JWT authorization, including how it identifies the user and the policies to be applied. + +## JWT Authorization Flow + +When a request with a JWT arrives at Tyk Gateway, after the authentication (signature and claim validation) step, Tyk performs the following steps to authorize the request: + +1. **Identity Extraction**: The user identity is extracted from the token according to this order of precedence: + - The `kid` header (unless `skipKid` is enabled) + - A custom claim (specified in `subjectClaims`) + - The standard `sub` claim (fallback) + +2. **Policy Resolution**: Tyk determines which [policy](/api-management/policies#what-is-a-security-policy) to apply to the request: + - From scope-to-policy mapping + - From default policies + +3. **Update Session**: The [session](/api-management/policies#what-is-a-session-object) is updated with the identity and policies. + +In the following sections, we provide a detailed explanation of each of these steps. + +## Identifying the Session Owner + +A unique identity is stored in the session object to associate it with the authenticated user. This identifier is extracted from the JWT by checking the following fields in order of precedence: + +1. The standard Key ID header (`kid`) in the JWT (unless the `skipKid` option is enabled) +2. The subject identity claim identified by the value(s) stored in `subjectClaims` (which allows API administrators to designate any JWT claim as the identity source (e.g., user_id, email, etc.). + + When multiple values are provided in the `subjectClaims` array, Tyk processes them as follows: + + 1. Tyk tries each claim **in the exact order they appear** in the array + 2. For each claim, Tyk checks if: + - The claim exists in the token + - The claim value is a string and is not empty + 3. Tyk uses the **first valid, non-empty value** it finds and stops processing further claims + 4. If none of the claims yield a valid identity, Tyk proceeds to the next stage (the `sub` claim) + + + + + Prior to Tyk 5.10, the subject identity claim was retrieved from `identityBaseField`; see [using multiple identity providers](#using-multiple-identity-providers) for details and for the Tyk Classic API alternative. + + + +3. The `sub` [registered claim](/api-management/authentication/jwt-claim-validation#registered-vs-custom-claims). + +**Example** + +In this example, `skipKid` has been set to `true`, so Tyk checks the `subjectClaims` and determines that the value in the custom claim `user_id` within the JWT should be used as the identity for the session object. + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + skipKid: true + subjectClaims: [user_id] +``` + + + +Session objects can be cached to improve performance, so the identity extraction is only performed on the first request with a JWT, or when the cache is refreshed. + + + +## Identifying the Tyk Policies to be applied + +[Security Policies](/api-management/policies) are applied (or mapped) to the session object to configure authorization for the request. Policies must be [registered](/api-management/policies#how-you-can-create-policies) with Tyk, such that they have been allocated a unique *Tyk Policy Id*. + +Tyk supports three different types of policy mapping, which are applied in this priority order: + +1. Direct policy mapping +2. Scope policy mapping +3. Default policy mapping + +### Direct policies + +You can optionally specify policies to be applied to the session via the *policy claim* in the JWT. This is a [Private](https://datatracker.ietf.org/doc/html/rfc7519#section-4.3) Claim (not a registered claim) and can be anything you want, but typically we recommend the use of `pol`. You must instruct Tyk where to look for the policy claim by configuring the `basePolicyClaims` field in the API definition. + +Note that we typically refer to Private Claims as Custom Claims. + +In this example, Tyk has been configured to check the `pol` claim in the JWT to find the *Policy Ids* for the policies to be applied to the session object: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + basePolicyClaims: [pol] +``` + +In the JWT, you should then provide the list of Tyk policy IDs as an array of values in that claim, for example you might declare: + +``` + "pol": ["685a8af28c24bdac0dc21c28", "685bd90b8c24bd4b6d79443d"] +``` + + + +Prior to Tyk 5.10, the base policy claim was retrieved from `policyFieldName`; see [using multiple identity providers](#using-multiple-identity-providers) for details and for the Tyk Classic API alternative. + + + +### Default policies + +You **must** configure one or more *default policies* that will be applied if no specific policies are identified from the JWT claims. These are configured using the `defaultPolicies` field in the API definition, which accepts a list of policy IDs. This prevents a session from being created with no authorization to interact with APIs on the Gateway. + + + +The Gateway will return `HTTP 403 Forbidden` if no default policies are configured, if the referenced policies don’t exist, or if policies are invalid or incorrectly formatted. + + + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + defaultPolicies: + - 685a8af28c24bdac0dc21c28 + - 685bd90b8c24bd4b6d79443d +``` + +### Scope policies + +Directly mapping policies to APIs relies on the sharing of Tyk Policy IDs with the IdP (so that they can be included in the JWT) and may not provide the required flexibility. + +Tyk supports a more advanced approach where policies are applied based on scopes declared in the JWT. This keeps separation between the IdP and Tyk-specific concepts, and supports much more flexible configuration. + +Within the JWT, you identify a Private Claim that will hold the authorization (or access) scopes for the API. You then provide, within that claim, a list of *scopes*. In your API definition, you configure the `scopes.claims` to instruct Tyk where to look for the scopes and then you declare a mapping of scopes to policies within the `scopes.scopeToPolicyMapping` object. + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + scopes: + scopeToPolicyMapping: + - scope: read: users + policyId: 685bd90b8c24bd4b6d79443d + - scope: write: users + policyId: 685a8af28c24bdac0dc21c28 + claims: [accessScopes] +``` + +In this example, Tyk will check the `accessScopes` claim within the incoming JWT and apply the appropriate policy if that claim contains the value `read: users` or `write: users`. If neither scope is declared in the claim, or the claim is missing, the default policy will be applied. + + + +Prior to Tyk 5.10, the authorization scopes claim was retrieved from `scopes.claimName`; see [using multiple identity providers](#using-multiple-identity-providers) for details and for the Tyk Classic API alternative. + + + +#### Declaring Multiple Scopes + +You can declare multiple scopes by setting the value of the **authorization scopes claim** in one of the following ways: + +* **String with space-delimited list of values (standard format)** + + ```json + "accessScopes": "read: users write: users" + ``` + +* **Array of strings** + + ```json + "accessScopes": ["read: users", "write: users"] + ``` + +* **String with space-delimited list inside a nested key** + + ```json + "accessScopes": { "access": "read: users write: users" } + ``` + +* **Array of strings inside a nested key** + + ```json + "accessScopes": { "access": ["read: users", "write: users"] } + ``` + +**Important:** + +* If your scopes are defined inside a nested key, use **dot notation** for the `scopes.claims` value. + + * For **examples 1 and 2**, set `scopes.claims` to: + + ``` + accessScopes + ``` + * For **examples 3 and 4**, set `scopes.claims` to: + + ``` + accessScopes.access + ``` + +**Example JWT fragment:** +If this JWT is provided to an API configured as described above, Tyk will apply both policies to the session object. + +```json +{ + "sub": "1234567890", + "name": "Alice Smith", + "accessScopes": ["read: users", "write: users"] +} +``` + +### Combining policies + +Where multiple policies are mapped to a session (for example, if several scopes are declared in the JWT claim, or if you set multiple *default policies*), Tyk will apply all the matching policies to the request, combining their access rights and using the most permissive rate limits and quotas. It's important when creating those policies to ensure that they do not conflict with each other. + +Policies are combined as follows: + +1. Apply direct-mapped policies declared via `basePolicyClaims` +2. Apply scope-mapped policies declared in `scopeToPolicyMapping` based upon scopes in the JWT +3. If no policies have been applied in steps 1 or 2, apply the default policies from `defaultPolicies` + +When multiple policies are combined, the following logic is applied: + +- **access rights** A user gets access to an endpoint if ANY of the applied policies grant access +- **consumption limits** Tyk uses the most permissive values (highest quota, highest throughput ) +- **other settings** The most permissive settings from any policy are applied + +### Policy Best Practices + +When creating multiple policies that might be applied to the same JWT, we recommend using [partitioned policies](/api-management/policies#partitioned-policies) - policies that control specific aspects of API access rather than trying to configure everything in a single policy. + +For example: + +- Create one policy that grants read-only access to specific endpoints +- Create another policy that grants write access to different endpoints +- Create a third policy that sets specific rate limits + +To ensure these policies work correctly when combined: + +- Set `per_api` to `true` in each policy. This ensures that the policy's settings only apply to the specific APIs listed in that policy, not to all APIs globally. +- Avoid listing the same `API ID` in multiple policies with conflicting settings. Instead, create distinct policies with complementary settings that can be safely combined. + + +## Session Updates + +After authenticating the token and extracting the necessary identity and policy information, Tyk creates or updates a session object that controls access to the API. + +The following [session attributes](/api-management/policies#session-object) are modified based on the policies: + +1. **Access Rights**: Determines which API endpoints the token can access +2. **Rate Limits**: Controls how many requests per second/minute the token can make +3. **Quotas**: Sets the maximum number of requests allowed in a time period +4. **Metadata**: Custom metadata from the policies is added to the session +5. **Tags**: Policy tags are added to the session + +In addition to updating the session, Tyk extracts claims from the JWT and makes them available as context variables for use in other [middleware](/api-management/traffic-transformation). + + + +When a JWT's claims change (for example, by configuring different scopes or policies), Tyk updates the session with the new policies on the subsequent request made with the token. + + + +## Advanced Configuration + +### Using Multiple Identity Providers + +When using multiple Identity Providers (IdPs), you may need to check different claim locations for the same information. Tyk supports definition of **multiple claim locations** for subject identity and policy IDs. + +* **Before Tyk 5.10 (and for Tyk Classic APIs):** + + * The Gateway could only check **single claims** for: + + * Subject identity + * Base policy + * Scope-to-policy mapping + * This setup didn’t support multiple IdPs using different claim names (e.g **Keycloak** uses `scope` and **Okta** uses `scp`) + +* **From Tyk 5.10 onwards (Tyk OAS APIs):** + + * You can configure **multiple claim names** for: + + * Subject identity + * Base policy + * Scope-to-policy mapping + * This allows Tyk to locate data across various tokens and IdPs more flexibly. + +**Configuration summary:** + +| API Configuration Type | Tyk Version | Subject Identity Locator | Base Policy Locator | Scope-to-Policy Mapping Locator | +| :---------------------- | :----------- | :------------------------- | :----------------------- | :------------------------------- | +| Tyk OAS | pre-5.10 | `identityBaseField` | `policyFieldName` | `scopes.claimName` | +| Tyk OAS | 5.10+ | `subjectClaims` | `basePolicyClaims` | `scopes.claims` | +| Tyk Classic | all | `jwt_identity_base_field` | `jwt_policy_field_name` | `jwt_scope_claim_name` | + +**Example configuration:** + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + # Legacy single field (still supported) + identityBaseField: "sub" + + # New multi-location support (Tyk 5.10+) + subjectClaims: + - "sub" + - "username" + - "user_id" +``` + +#### Backward Compatibility + +The new configuration is fully backward compatible: + +- Existing `identityBaseField`, `policyFieldName`, and `scopes.claimName` settings continue to work +- If both old and new fields are specified, the new fields take precedence +- When using only new fields, the first element in each array is used to set the corresponding legacy field for backward compatibility + diff --git a/api-management/authentication/jwt-claim-validation.mdx b/api-management/authentication/jwt-claim-validation.mdx new file mode 100644 index 000000000..01b72cd22 --- /dev/null +++ b/api-management/authentication/jwt-claim-validation.mdx @@ -0,0 +1,799 @@ +--- +title: "JWT Claim Validation" +description: "How to validate JWT claims in Tyk API Gateway." +keywords: "Authentication, JWT, JSON Web Tokens, Claims, Validation" +sidebarTitle: "Claim Validation" +--- + +## Availability + +| Component | Editions | +| :------------- | :------------------------- | +| Tyk Gateway | Community and Enterprise | + +## Introduction + +A JSON Web Token consists of three parts separated by dots: `header.payload.signature`. The payload contains the claims, a set of key-value pairs that carry information about the token and its subject. + +Tyk can validate these claims to ensure that incoming JWTs meet your security requirements before granting access to your APIs. + +By validating JWT claims, you can enforce fine-grained access control policies, ensure tokens originate from trusted sources, and verify that users have the appropriate permissions for your APIs. + + + +**Viewing JWT Claims** + +To inspect the claims in a JWT, use online tools like [jwt.io](https://jwt.io) for quick debugging + + + +{/* ## Quick Start */} + +## JWT Claims Fundamentals + +### Registered vs Custom Claims + +JWT claims can be categorized into two types: + +- **Registered Claims**: + + Registered Claims are standardized by the JWT specification ([RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519#section-4.1)) and have predefined meanings. + + These claims are further grouped into: + + - **Temporal Claims:** time-based validation + - **Identity Claims:** content-based validation + + | Claim | Name | Purpose | Type | + | ----- | --------------- | ---------------------------------------- | -------- | + | `iss` | Issuer | Identifies who issued the token | Identity | + | `aud` | Audience | Identifies who the token is intended for | Identity | + | `sub` | Subject | Identifies the subject of the token | Identity | + | `exp` | Expiration Time | When the token expires | Temporal | + | `iat` | Issued At | When the token was issued | Temporal | + | `nbf` | Not Before | When the token becomes valid | Temporal | + | `jti` | JWT ID | Unique identifier for the token | Identity | + +- **Custom Claims**: + + Custom Claims, referred to as Private Claims in the [JWT Specification](https://datatracker.ietf.org/doc/html/rfc7519#section-4.3), are application-specific and can contain any information relevant to your use case, such as user roles, permissions, department, or metadata. + +**Example JWT Payload with Both Registered and Custom Claims**: + +```json +{ + // Registered claims + "iss": "https://auth.company.com", + "aud": "api.company.com", + "sub": "user123", + "exp": 1735689600, + "iat": 1735603200, + + // Custom claims + "department": "engineering", + "role": "admin" +} +``` + +### Supported Claims and API Types + +| Claim Category | Sub-Category | Tyk OAS APIs | Tyk Classic APIs | Version | +| :--------------------- | :----------------------------------------- | :------------------------- | :----------------------------- | :------------------ | +| **Registered Claims** | **Temporal** (`exp`, `iat`, `nbf`) | βœ… Yes | βœ… Yes | All versions | +| **Registered Claims** | **Identity** (`iss`, `aud`, `sub`, `jti`) | βœ… Yes | ❌ Yes | 5.10+ | +| **Custom Claims** | β€” | βœ… Yes | ❌ No | 5.10+ | + +### How Tyk Processes JWT Claims + +After [verifying](/basic-config-and-security/security/authentication-authorization/json-web-tokens#signature-validation) that the token hasn't been tampered with, Tyk processes claims in this order: + +1. **Claims Extraction**: All claims from the JWT payload are extracted and stored in [context variables](/api-management/traffic-transformation/request-context-variables) with the format `jwt_claims_CLAIMNAME`. For example, a claim named `role` becomes accessible as `jwt_claims_role`. + +2. **Claims Validation**: + - [Registered Claims Validation](#registered-claims-validation): Checks standard claims against your configuration + - [Custom Claims Validation](#custom-claims-validation): Applies your business rules to custom claims + - [Authorization](/api-management/authentication/jwt-authorization): Uses validated claims to determine API access and apply policies + +If any validation step fails, Tyk rejects the request with a specific error message indicating which claim validation failed and why. + +## Registered Claims Validation + +[Registered Claims](#registered-vs-custom-claims) are grouped into: +- **Temporal claims** (time-based validation): Supported in both Tyk Classic APIs and OAS APIs +- **Identity claims** (content-based validation): Available only in Tyk OAS APIs + +### Temporal Claims + +Temporal claims define the validity period of a JWT. Tyk automatically validates these claims when present in the token. + +- **Expiration Time (exp)**: the `exp` claim specifies when the token expires (as a Unix timestamp). Tyk rejects tokens where the current time is after the expiration time. +- **Issued At (iat)**: the `iat` claim specifies when the token was issued. Tyk rejects tokens that claim to be issued in the future. +- **Not Before (nbf)**: the `nbf` claim specifies the earliest time the token can be used. Tyk rejects tokens before this time. + +#### Clock Skew Configuration + +Due to the nature of distributed systems, you may encounter clock skew between your Identity Provider and Tyk servers. You can configure tolerance for timing differences: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + issuedAtValidationSkew: 5 # Allow tokens issued up to 5 seconds in the future + notBeforeValidationSkew: 2 # Allow tokens to be valid 2 seconds early + expiresAtValidationSkew: 2 # Allow tokens to be valid 2 seconds past expiration +``` + +- `expiresAtValidationSkew` allows recently expired tokens to be considered valid +- `issuedAtValidationSkew` allows tokens claiming future issuance to be valid +- `notBeforeValidationSkew` allows tokens to be valid before their `nbf` time + + + + + Temporal claim validation and the associated clock skew controls were supported by Tyk before 5.10.0 and also for [Tyk Classic APIs](/api-management/gateway-config-tyk-classic#configuring-authentication-for-tyk-classic-apis) + + + +### Identity Claims + +Identity claims provide information about the token's origin and intended use. Unlike temporal claims, these require explicit configuration to enable validation. + +#### Issuer Validation (iss) + +Validates that a trusted Identity Provider issued the token: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + allowedIssuers: + - "https://auth.company.com" + - "https://auth.partner.com" +``` + +Tyk accepts tokens if the `iss` claim matches any configured issuer. If `allowedIssuers` is empty, no issuer validation is performed. + +#### Audience Validation (aud) + +Validates that the token is intended for your API: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + allowedAudiences: + - "api.company.com" + - "mobile-app" +``` + +The `aud` claim can be a string or an array. Tyk accepts tokens if any audience value matches any configured audience. If `allowedAudiences` is empty, no audience validation is performed. + +#### Subject Validation (sub) + +Validates the token subject against allowed values: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + allowedSubjects: + - "user" + - "service-account" + - "admin" +``` + +Useful for restricting API access to specific types of subjects or known entities. If `allowedSubjects` is empty, no subject validation is performed. + +#### JWT ID Validation (jti) + +Validates that the token contains a unique identifier: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + jtiValidation: + enabled: true +``` + +When enabled, Tyk requires the `jti` claim to be present. This is useful for token tracking and revocation scenarios. Note that Tyk does not perform any validation on the content of the claim, only that it is present. + +### Configuration Examples + +Basic registered claims validation: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + allowedIssuers: ["https://auth.company.com"] + allowedAudiences: ["api.company.com"] + jtiValidation: + enabled: true + expiresAtValidationSkew: 5 +``` + +Multi-IdP configuration: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + allowedIssuers: + - "https://auth0.company.com" + - "https://keycloak.company.com" + allowedAudiences: + - "api.company.com" + - "mobile.company.com" + subjectClaims: ["sub", "username"] +``` + +In this example, we expect one Identity Provider to present the subject in the `sub` claim, and the other to present it in the `username` claim. + +## Custom Claims Validation + +Custom claims validation allows you to enforce business-specific rules on JWT tokens beyond the standard registered claims. + +**Use Cases**: + +- **Role-based access control**: Validate that users have required roles (for example, `admin`, `editor`, `viewer`) +- **Department restrictions**: Ensure users belong to authorized departments +- **Feature flags**: Check if users have access to specific features or API endpoints +- **Geographic restrictions**: Validate user location or region-based access +- **Subscription tiers**: Enforce access based on user subscription levels + +### Validation Types + +The custom claims validation supports three distinct validation types. These validation types can be applied to any custom claim in your JWT tokens, providing flexible control over your authorization logic. + +#### Required + +Required type validation ensures that a specific claim exists in the JWT token, regardless of its value. + +**Use Cases:** + +- Ensuring user metadata is present (even if empty) +- Validating that required organizational fields exist +- Confirming compliance with token structure requirements + +**Behavior:** + +- βœ… **Passes** if the claim exists with any non-null value (including empty strings, arrays, or objects) +- ❌ **Fails** if the claim is missing or explicitly set to `null` + +**Example Configuration:** + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + department: + type: required + user_metadata: + type: required +``` + +#### Exact Match + +Exact match type validation verifies that a claim's value exactly matches one of the specified allowed values. + +**Use Cases:** + +- Role validation (e.g., `admin`, `editor`, `viewer`) +- Environment-specific access (e.g., `production`, `staging`, `development`) +- Subscription tier validation (e.g., `premium`, `standard`, `basic`) +- Boolean flag validation (`true`, `false`) + +**Behavior:** + +- βœ… Passes if the claim value exactly matches any value in the allowedValues array +- ❌ Fails if the claim value doesn't match any allowed value, if the claim is missing, or if allowedValues is empty +- Case-sensitive for string comparisons +- Type-sensitive (string "true" β‰  boolean true) + +**Example Configuration:** + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + role: + type: exact_match + allowedValues: + - admin + - editor + - viewer + subscription_tier: + type: exact_match + allowedValues: + - premium + - standard +``` + +#### Contains + +The Contains type validation checks whether a claim's value contains or includes one of the specified values. This validation type works differently depending on the data type of the claim and is particularly useful for array-based permissions and substring matching. + +**Use Cases:** + +- Permission arrays (`["read: users", "write: posts", "admin: system"]`) +- Tag-based access control +- Partial string matching for departments or locations +- Multi-value scope validation + +**Behavior by Data Type:** + +Arrays: +- βœ… Passes if the array contains any of the specified values +- ❌ Fails if none of the specified values are found in the array + +Strings: +- βœ… Passes if the string contains any of the specified substrings +- ❌ Fails if none of the specified substrings are found + +Other Types: +- Converts to a string and performs substring matching + +Example Configuration: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + permissions: + type: contains + allowedValues: + - admin: system + - write:api + department_code: + type: contains + allowedValues: + - ENG + - SALES +``` + +With this configuration, a token might contain these claims: + +```json +{ + "permissions": ["read: users", "write: posts", "admin: system"], + "department_code": "ENG-BACKEND", +} +``` + +In this example: +- `permissions` validation passes because the array contains `"admin: system"` +- `department_code` validation passes because the string contains `"ENG"` + +### Data Type Support + +The framework is designed to handle the diverse data types commonly found in JWT tokens. The validation behavior adapts intelligently based on the actual data type of each claim, ensuring robust and predictable validation across different token structures. + +#### Supported Data Types + +##### String Values + +String claims are the most common type in JWT tokens and support all three validation types with intuitive behavior. + +**Validation behavior** + +- **Required**: Passes if the string exists (including empty strings `""`) +- **Exact Match**: Performs case-sensitive string comparison +- **Contains**: Checks if the string contains any of the specified substrings + +**Example** + +Claims: + +```json +{ + "department": "Engineering", + "user_id": "user123", + "email": "john.doe@company.com" +} +``` + +Validation configuration: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + department: + type: exact_match + allowedValues: + - Engineering + - Sales + - Marketing + email: + type: contains + allowedValues: + - "@company.com" + - "@partner.com" +``` + +##### Numeric Values + +Numeric claims (integers and floating-point numbers) are validated with type-aware comparison logic. + +**Validation behavior** + +- **Required**: Passes if the number exists (including `0`) +- **Exact Match**: Performs numeric equality comparison (`42` matches `42.0`) +- **Contains**: Converts to a string and performs substring matching + +**Example** + +Claims: + +```json +{ + "user_level": 5, + "account_balance": 1250.75, + "login_count": 0 +} +``` + +Validation configuration: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + user_level: + type: exact_match + allowedValues: + - 1 + - 2 + - 3 + - 4 + - 5 + account_balance: + type: required +``` + +##### Boolean Values + +Boolean claims are commonly used for feature flags and permission toggles. + +**Validation Behavior** + +- **Required**: Passes if the boolean exists (`true` or `false`) +- **Exact Match**: Performs strict boolean comparison +- **Contains**: Converts to string (`"true"` or `"false"`) and performs substring matching + +**Example** + +Claims: + +```json +{ + "is_admin": true, + "email_verified": false, + "beta_features": true +} +``` + +Validation configuration: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + is_admin: + type: exact_match + allowedValues: + - true + email_verified: + type: required +``` + +##### Array Values + +Arrays are particularly powerful for permission systems and multi-value attributes. + +**Validation behavior** + +- **Required**: Passes if the array exists (including empty arrays `[]`) +- **Exact Match**: Checks if the entire array exactly matches one of the allowed arrays +- **Contains**: Checks if the array contains any of the specified values (most common use case) + +**Example** + +Claims: + +```json +{ + "roles": ["user", "editor"], + "permissions": ["read: posts", "write: posts", "delete: own"], + "departments": ["engineering", "product"], + "tags": [] +} +``` + +Validation configuration: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + permissions: + type: contains + allowedValues: + - write: posts + - admin: system + roles: + type: contains + allowedValues: + - admin + - editor + - moderator + tags: + type: required +``` + +##### Object Values + +Complex object claims can be validated, though typically you'll want to validate specific nested properties using [dot notation](#nested-claims). + +**Validation Behavior** + +- **Required**: Passes if the object exists (including empty objects `{}`) +- **Exact Match**: Performs deep object comparison (rarely used) +- **Contains**: Converts to a JSON string and performs substring matching + +**Example** + +Claims: + +```json +{ + "user_metadata": { + "department": "Engineering", + "level": 5, + "location": "US" + }, + "preferences": {} +} +``` + +Configuration: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + user_metadata: + type: required + preferences: + type: required +``` + +##### Type Coercion and Edge Cases + +**Null and Undefined Values** + +- null values: Always fail validation (treated as missing) +- undefined/missing claims: Fail all validation types except when validation is not configured + +**Mixed-Type Arrays** + +Arrays containing different data types are supported. The `contains` validation will attempt to match values using appropriate type comparison, + +```json +{ + "mixed_permissions": ["read", 42, true, "admin"] +} +``` + +**Type Mismatches** + +When the expected value type doesn't match the claim type, Tyk performs intelligent conversion: + +- Numbers to strings: `42` becomes `"42"` +- Booleans to strings: `true` becomes "`true"` +- Objects/arrays to strings: Converted to JSON representation + +##### Best Practices + +- Be Explicit About Types: When configuring `allowedValues`, use the same data type as expected in the token +- Use Arrays for Multi-Value Validation: Prefer array-based claims for permissions and roles +- Consider Empty Values: Remember that empty strings, arrays, and objects pass `required` validation +- Test Type Coercion: Verify behavior when token types don't match expected types + +### Nested Claims + +JSON Web Tokens often contain complex, hierarchical data structures with nested objects and arrays. Tyk's custom claims validation framework supports validating nested claim structures using dot notation syntax. + +**Basic Syntax:** + +- `user.name` - Access the `name` property within the `user` object +- `permissions.0.resource` - Access the `resource` property of the first element in the `permissions` array + + + + + **Dot Notation** + + Tyk uses [gjson](https://github.com/tidwall/gjson) to parse dot notation paths. + + + +#### Nested Object Validation + +The most common use case for dot notation is validating properties within nested objects, such as user metadata, organizational information, or configuration settings. + +**Example Token** + +```json +{ + "user": { + "name": "John Doe", + "email": "john.doe@company.com", + "profile": { + "department": "Engineering", + "level": "senior", + "location": { + "country": "US", + "region": "West" + } + } + } +} +``` + +You could set the following configuration to validate the requester's department and level: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + user.profile.department: + type: exact_match + allowedValues: + - Engineering + - Sales + - Marketing + user.profile.level: + type: contains + allowedValues: + - senior + - lead + - principal +``` + + +#### Nested Array Validation + +Arrays are commonly used in JWT claims to represent lists of permissions, roles, or other multi-value attributes. Tyk supports validating specific elements within arrays using dot notation with numeric indices. + +**Example Token** + +```json +{ + "permissions": [ + { + "resource": "users", + "actions": ["read", "write"] + }, + { + "resource": "reports", + "actions": ["read"] + } + ] +} +``` + +You can validate specific array elements: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + "permissions.0.resource": + type: exact_match + allowedValues: ["users"] + "permissions.1.actions.0": + type: exact_match + allowedValues: ["read"] +``` + + + +**Dot Notation** + +When a nested path doesn't exist (e.g., `user.profile.level` but `profile` doesn't exist) or when an array index is out of bounds (e.g., `permissions.999.resource`), the claim is treated as missing. This will cause validation to fail for blocking rules or generate a warning for non-blocking rules. + + + +#### Recommendations + +Test your nested claim validation rules with representative JWT tokens to ensure they behave as expected. Use online tools like [gjson.dev](https://gjson.dev/) to experiment with dot notation paths and verify they correctly access the desired values. + +### Non-blocking Validation + +Non-blocking validation allows JWT claims to fail validation with a warning logged, while still permitting the request to proceed. + +This behavior allows you to: + +- Monitor how new validation rules would affect traffic without disrupting users +- Gradually roll out stricter validation requirements +- Debug validation issues in production environments + +#### How Non-blocking Validation Works + +When configured, a validation rule can be set to "non-blocking" mode, which means: + +1. If validation passes, the request proceeds normally +2. If validation fails, instead of rejecting the request: + - A warning is logged to the Tyk Gateway log file at the `WARN` log level + - The validation process continues to evaluate other custom claims + - The request is allowed to proceed to the upstream API + +#### Configuring Non-Blocking Mode + +Non-blocking mode can be configured for any custom claim validation rule with the addition of the boolean `nonBlocking` flag, for example: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + customClaimValidation: + user.profile.department: + type: exact_match + allowedValues: + - Engineering + - Sales + - Marketing + user.preferences.notifications: + type: required + nonBlocking: true +``` + +The `nonBlocking` flag in the validation rule for `user.preferences.notifications` means that if this claim is missing from the received token, the token will not fail validation, but a warning will be logged. \ No newline at end of file diff --git a/api-management/authentication/oauth-2.mdx b/api-management/authentication/oauth-2.mdx new file mode 100644 index 000000000..1a6b8b56a --- /dev/null +++ b/api-management/authentication/oauth-2.mdx @@ -0,0 +1,639 @@ +--- +title: "OAuth 2.0" +description: "How to configure OAuth 2.0 in Tyk?" +keywords: "Authentication, Authorization, Tyk Authentication, Tyk Authorization, Secure APIs, OAuth 2.0" +sidebarTitle: "OAuth 2.0" +--- + +## Use Tyk as an OAuth 2.0 Authorization Server + +Tyk can act as an OAuth 2.0 *authorization server*, performing token generation and management for *clients* accessing APIs deployed on Tyk. There are many great resources on the Internet that will help you to understand the OAuth 2.0 Authorization Framework, which we won't attempt to duplicate here. We will provide a basic introduction to the [concepts and terminology](#oauth-20-core-concepts) before we dive into the details of using Tyk as your *auth server*. + +Tyk offers some great features when used as the *authorization server* including: + +- **Fine-Grained Access Control:** Manage access using Tyk's built-in access controls, including versioning and named API IDs +- **Usage Analytics:** Leverage Tyk's analytics capabilities to monitor OAuth 2.0 usage effectively, grouping data by Client Id +- **Multi-API Access**: Enable access to multiple APIs using a single OAuth token; configure one API for OAuth 2.0 token issuance and the other APIs with the [Auth Token](/api-management/authentication/bearer-token) method, linking them through a common policy + +*Tyk as OAuth authorization server* supports the following *grant types*: + +- [Authorization Code Grant](#using-the-authorization-code-grant): the *client* is redirected to an *identity server* where the *user* must approve access before an *access token* will be issued +- [Client Credentials Grant](#using-the-client-credentials-grant): used for machine-to-machine access, authentication is performed using only the *client Id* and *client secret* +- [Resource Owner Password Grant](#using-the-resource-owner-password-grant) (a.k.a. Password Grant): only for use where the *client* is highly trusted, as the *client* must provide the *Resource Owner*'s own credentials during authentication + + + + + **Tyk does not recommend the use of Resource Owner Password Grant**. This method is considered unsafe and is prohibited in the [OAuth 2.0 Security Best Practice](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-security-topics-13#section-3.4") but is supported for use with legacy clients. + + + +To make use of this, you'll need to: + +- understand how to integrate your *client* (and, for Authorization Code grant, your *identity server*) according to the OAuth grant type +- [register a client app](#client-app-registration) for each client that needs to access the API +- [configure your API proxy](#configuring-your-api-proxy) to use the *Tyk OAuth 2.0* authentication method + +{/* TODO: This video probably needs to be re-recorded with Tyk OAS, so not publishing for now: */} + + + +## OAuth 2.0 Core Concepts + +**OAuth 2.0** (Open Authorization 2.0) is a widely adopted authorization protocol that allows third-party applications to access user resources securely, without needing to expose sensitive credentials such as user passwords. It is an industry-standard framework that enables a delegated approach to securing access to APIs and services. The [IETF OAuth 2.0 specification](https://datatracker.ietf.org/doc/html/rfc6749) outlines the standard for OAuth 2.0. + +> "The OAuth 2.0 authorization framework enables a third-party application to obtain limited access to an HTTP service, either on behalf of a resource owner by orchestrating an approval interaction between the resource owner and the HTTP service, or by allowing the third-party application to obtain access on its own behalf." β€” [RFC 6749](https://datatracker.ietf.org/doc/html/rfc6749) + +OAuth 2.0 provides a mechanism for **client applications** to request limited access to resources hosted by a **resource server**, on behalf of a **resource owner** (typically a user), without exposing the resource owner's credentials. This allows secure sharing of data between applicationsβ€”for example, allowing a calendar app to access a user's contacts to automatically find available time slots for meetings. + +OAuth 2.0 has many variations and flows suited for different use cases, this section will provide an overview of the core principles, terminology, and key concepts, specifically focusing on how you can implement OAuth 2.0 with Tyk. + +### Terminology + +- **Protected Resource**: The service or data that is protected by OAuth (e.g. an API endpoint) and requires authorization to access. +- **Resource Owner**: The **user** or system that owns the *Protected Resource* and has the ability to grant or deny access to it. +- **Client**: The application or system that seeks access to the *Protected Resource*. It acts on behalf of the *Resource Owner*. +- **Access Token**: A short-lived piece of data that grants the *Client* access to the *Protected Resource*. The token proves that the *Client* has been authorized by the *Resource Owner*. +- **Authorization Server**: The server that issues *Access Tokens* to the *Client* after validating the *Client*'s identity and obtaining consent from the *Resource Owner*. +- **Client Application**: The application that requests authorization from the *Authorization Server*. This application must first be registered with the *Authorization Server* to obtain credentials (*Client Id* and *Client Secret*). +- **Resource Server**: The server that hosts the *Protected Resource*. It receives access requests from *Clients*, which must include a valid *Access Token*. +- **Identity Server**: A server that authenticates the *Resource Owner*, offering the facility to log in and authorize *Client* access to *Protected Resources*. +- **Scope**: Defines the specific permissions or access levels being requested by the *Client* (e.g. read, write, delete). +- **Grant Type**: The method by which the *Client* obtains an *Access Token*, based on the OAuth flow being used (e.g. Authorization Code, Client Credentials, Resource Owner Password Credentials). + +### Access Tokens + +In OAuth 2.0, **access tokens** are used to represent the authorization granted to the *client* by the *resource owner*. These tokens are typically small, opaque data objects that are passed along with each API request to authenticate the *client*. While the OAuth 2.0 specification does not mandate a specific format, **JSON Web Tokens (JWTs)** are commonly used as they can encode metadata, such as the *user*'s identity, permissions, and token expiry time. + +Tokens usually come with an expiration date to limit the time they are valid and minimize the risk of abuse. *Access tokens* can often be refreshed via a **refresh token** if they expire, allowing for long-lived access without requiring the *user* (*resource owner*) to reauthorize the *application* (*client*). + +### Client Application + +For a *client* to request an *Access Token* from the *Authorization Server*, it must first authenticate itself. This ensures that the *Resource Owner* can confidently delegate access to the requested resources. + +To do this, the *client* is registered with the *Authorization Server* as a **Client Application**, which requires the following elements: + +- **Client Id**: A unique, public identifier for the *client application* (e.g., a username or application name). +- **Client Secret**: A confidential string (like a password) that is shared between the *client* and the *Authorization Server*. The *client secret* is never exposed to the *Resource Owner*. +- **Redirect URI**: The URL to which the *client* will be redirected after the authorization process is complete (either granted or denied). + +The *client* sends the *client Id* and *client secret* during the authorization request to prove its identity and authenticate its request for an *access token*. Depending on the OAuth *grant type* being used (e.g. Authorization Code Flow, Client Credentials Flow), the *Authorization Server* will authenticate the *client* and, if successful, issue an *Access Token*. + + +## Manage Client Access Policies + +The *access tokens* issued to clients by *Tyk Authorization Server* are the same as other [session objects](/api-management/policies#what-is-a-session-object) and can be associated with [access security policies](/api-management/policies#what-is-a-security-policy) at the point of creation. These allow the application of quotas, rate limits and access rights in the normal manner. + +Security policies can be assigned to *client apps* and will be applied to all access tokens issued for that *client app*. + + +## Client App Registration + +For all grant types, the first common step is the registration of the *client* with Tyk Dashboard by creation of a *Client App*. This will allocate a *client Id* and *client secret* that must be provided in future authentication requests by the *client*. + +### Using the Tyk Dashboard UI + +1. *Client apps* are registered per-API, so the first step is to [configure Tyk OAuth 2.0](#configuring-your-api-proxy) as the security method to be used for the API. With this done, you can navigate to the OAuth Client management screen for the API from the **Actions** menu on the **Created APIs** screen: + +Accessing the list of OAuth Clients for an API + +2. You will now be prompted to register a *client app* that will be granted access to the API configuring: + +- redirect URI +- [optional] [security policies](#manage-client-access-policies) to be applied to access tokens generated for the client +- [optional] [metadata](/api-management/policies#what-is-a-session-metadata) to be added to the access tokens + +Add New OAuth Client + +**Note**: when using *Authorization Code grant* the *redirect uri* configured for the *client app* must be the same as that configured in the API definition. + +Select the **Create** button to register the *client app*. + +3. In the OAuth Client management screen, you will see a list of *client apps* registered with the API (as identified by their *client Id*). By clicking on the list item, or from the **Actions** menu's **Edit** option you will be taken to the *Edit Client app* screen, where you can see the *client secret* and make any modifications you need. There is also the option to [revoke tokens](#revoking-access-tokens) that have been issued for this *client app*. + +View client Id and client secret + +### Using the Tyk Dashboard API + +The Tyk Dashboard API contains several endpoints that are provided to manage *client apps*. *Client apps* are registered per-API, so each takes as an input the *API Id* for the API: + +| Action | Endpoint | Reference | +| :--- | :--- | :--- | +| Register a new client app | `POST /api/apis/oauth/{{api-id}}` | [link](/api-management/dashboard-configuration#create-a-new-oauth20-client) | +| Get a list of registered client apps | `GET /api/apis/oauth/{{api-id}}` | [link](/api-management/dashboard-configuration#list-oauth-clients) | +| Get the details of a client app | `GET /api/apis/oauth/{{api-id}}/{{client_id}}` | [link](/api-management/dashboard-configuration#get-an-oauth20-client) | +| Delete a client app | `DELETE /api/apis/oauth/{{api-id}}/{{client_id}}` | [link](/api-management/dashboard-configuration#delete-oauth-client) | + + +## Using the Authorization Code Grant + +When using Tyk as the Authorization Server with the Authorization Code grant, the following steps are followed after [registering the Client App](#client-app-registration): + +Authorization grant type flow + +**Explanatory notes:** + +(1) *client* makes a request to the [authorization endpoint](#authorization-request) on the *Auth Server* + +(2) The *Auth Server* notes the request parameters and returns `HTTP 307 Temporary Redirect`, redirecting the user to an *Identity Server* + +(5) the *user* must log in on the *Identity Server* and authorize the *client* + +(6) when the *user* successfully authenticates and authorizes the request, the *Identity Server* must request an [Authorization Code](#authorization-code-request) from the *Auth Server* + +(8) The *Identity Server* provides the *Authorization Code* to the *client* + +(9) The *client* exchanges the *Authorization Code* for an [Access Token](#exchange-the-authorization-code-for-an-access-token) from the *Auth Server* + +(10) The *client* uses the *Access Token* to authenticate with the protected API using the [Auth Token](/api-management/authentication/bearer-token) method + +### Integration with Identity Server + +Whilst Tyk can provide the *authorization server* functionality, issuing and managing access and authorization tokens, the *identity server* functions (authenticating users (resource owners) and allowing them to authorize client access) must be performed by a separate Identity Provider (IdP). + +The identity server will need access to the Tyk Dashboard API to [obtain an Authorization Code](/api-management/dashboard-configuration#oauth20-authorization-code). + +### Authorization Request + +The authorization endpoint for an API proxy on Tyk is a special endpoint automatically added to the proxy definition, accessible from `POST //oauth/authorize` + +The following parameters are required in a request to this endpoint: + +| Parameter | Value | +| :--------------- | :-------------------------- | +| `response_type` | `code` | +| `client_id` | client Id | +| `redirect_uri` | Redirect URI (URL encoded) | + +For example: + +```bash +curl -X POST https://tyk.cloud.tyk.io/my-api/oauth/authorize/ \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "response_type=code&client_id=my-client-id&redirect_uri=http%3A%2F%2Fidentityserver.com%2Fclient-redirect-uri" +``` + +This command, issued by the *client* is the first step of requesting access to the `/my-api` proxy deployed on a Tyk Gateway at `https://tyk.cloud.tyk.io`. + +If the *client Id* (`my-client-id`) is valid, the response will be `HTTP 307 Temporary Redirect` with the redirect URI (`http://identityserver.com/client-redirect-uri`) in the `location` header. + +### Authorization Code Request + +The *Identity Server* requests an *Authorization Code* from the *Authentication Server*. Tyk's *authorization code* endpoint is hosted in the [Tyk Dashboard API](/api-management/dashboard-configuration#oauth20-authorization-code), accessible from `POST /api/apis/{api_id}/authorize-client`. The same `redirect_uri` as provided in the original request must be provided alongside the `client_id` as a security feature to verify the client identity. + +This endpoint is protected using the Dashboard API secret assigned to the *Identity Server*, which must be provided in the `Authorization` header. + +The following parameters are required in a `POST` request to this endpoint: + +| Parameter | Value | +| :--------------- | :-------------------------- | +| `response_type` | `code` | +| `client_id` | client Id | +| `redirect_uri` | Redirect URI (URL encoded) | + +For example: + +```bash +curl -X POST \ + https://admin.cloud.tyk.io/api/apis/oauth/{my-api-id}/authorize-client/ \ + -H "Authorization: " \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "response_type=code&client_id=my-client-id&redirect_uri=http%3A%2F%2Fidentityserver.com%2Fclient-redirect-uri" +``` + +This command, issued by the *identity server* requests an *authorization code* from the Tyk Dashboard at `https://admin.cloud.tyk.io` to access the proxy with API Id `my-api-id`. + +If the *client Id* (`my-client-id`) is valid and `redirect_uri` matches the one provided in the initial request, an *authorization code* will be provided in the response payload, for example: + +```json +{ + "code": "EaG1MK7LS8GbbwCAUwDo6Q", + "redirect_to": "http://example.com/client-redirect-uri?code=EaG1MK7LS8GbbwCAUwDo6Q" +} +``` + +### Exchange the Authorization Code for an Access Token + +Once the *client* has the *authorization code*, it can exchange this for an *access token*, which is used to access the protected API. The token exchange endpoint for an API proxy on Tyk is a special endpoint automatically added to the proxy definition, accessible from `POST //oauth/token`. + +This endpoint is protected using [Basic Authentication](/api-management/authentication/basic-authentication) where the username is the *client Id* and the password is the *client secret*. + +The following parameters are required in the request: + +| Parameter | Value | +| :--------------- | :-------------------------- | +| `grant_type` | `authorization_code` | +| `client_id` | client Id | +| `code` | Authorization Code | +| `redirect_uri` | Redirect URI (URL encoded) | + +For example: + +```bash +curl -X POST \ + https://tyk.cloud.tyk.io/my-api/oauth/token/ \ + -H "Authorization: Basic bXktY2xpZW50LWlkOm15LWNsaWVudC1zZWNyZXQ=" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "grant_type=authorization_code&client_id=my-client-id&code=EaG1MK7LS8GbbwCAUwDo6Q&redirect_uri=http%3A%2F%2Fidentityserver.com%2Fclient-redirect-uri" +``` + +This command, issued by the *client* is the final step to obtain an access token for the `/my-api` proxy deployed on a Tyk Gateway at `https://tyk.cloud.tyk.io`. The basic auth key is the base64 encoded representation of `my-client-id:my-client-secret` The `client_id` and `redirect_uri` match those provided in the initial [authorization request](#authorization-request). The `code` is the *authorization code* provided to the *identity server* in the [authorization code request](#authorization-code-request). + +The response payload contains: +- `access_token`: the token which can be used by the *client* to access the protected API +- `expires_in`: the expiration date/time of the access token +- `token_type`: set to `bearer` indicating that the access token should be provided in an [Auth Token](/api-management/authentication/bearer-token) request to the protected API +- `refresh_token`: [optional] a special token that can be used in the [Refresh Token](#using-refresh-tokens) flow + +For example: + +```json +{ + "access_token": "580defdbe1d21e0001c67e5c2a0a6c98ba8b4a059dc5825388501573", + "expires_in": 3600, + "refresh_token": "NWQzNGVhMTItMDE4Ny00MDFkLTljOWItNGE4NzI1ZGI1NGU2", + "token_type": "bearer" +} +``` + + + +## Using the Client Credentials Grant +When using Tyk as the *authorization server* with the Client Credentials grant, the *client* accesses resources on behalf of itself rather than on behalf of a *user*, so there is no user login/authorization step (as seen with [Authorization Code grant](#using-the-authorization-code-grant)). This flow is ideal for server-to-server interactions. + +After [registering the Client App](#client-app-registration), the *client* simply requests an access token directly from the authorization server: + +Client Credentials grant type flow + +### Access Token Request + +The *client* obtains an access token for an API proxy on Tyk from a special endpoint automatically added to the proxy definition, accessible from `POST //oauth/token`. + +This endpoint is protected using Basic Authentication where the username is the client Id and the password is the client secret. + +The following parameters are required in the request: + +| Parameter | Value | +| :--------------- | :-------------------------- | +| `grant_type` | `client_credentials` | +| `client_id` | client Id | +| `secret` | client secret | + +For example: + +```bash +curl -X POST \ + https://tyk.cloud.tyk.io/my-api/oauth/token/ \ + -H "Authorization: Basic bXktY2xpZW50LWlkOm15LWNsaWVudC1zZWNyZXQ=" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "grant_type=client_credentials&client_id=my-client-id&client_secret=my-client-secret" +``` + +This command, issued by the *client* will obtain an access token for the `/my-api` proxy deployed on a Tyk Gateway at `https://tyk.cloud.tyk.io`. The basic auth key is the base64 encoded representation of `my-client-id:my-client-secret` The `client_id` and `client_secret` match those allocated by Tyk (the auth server) for the *client app*. + +The response payload contains: +- `access_token`: the token which can be used by the *client* to access the protected API +- `expires_in`: the expiration date/time of the access token +- `token_type`: set to `bearer` indicating that the access token should be provided in an [Auth Token](/api-management/authentication/bearer-token) request to the protected API + +For example: + +```json +{ + "access_token": "580defdbe1d21e0001c67e5c2a0a6c98ba8b4a059dc5825388501573", + "expires_in": 3600, + "token_type": "bearer" +} +``` + + + +Note that Client Credentials grant does not produce a *refresh token*. + + + + + +## Using the Resource Owner Password Grant +When using Tyk as the *authorization server* with the Resource Owner Password grant, the *client* provides the *user's* credentials when requesting an access token. There is no user login/authorization step (as seen with [Authorization Code grant](#using-the-authorization-code-grant)). **This flow is not recommended and is provided only for integration with legacy clients.** + +After [registering the Client App](#client-app-registration), the *client* simply requests an access token directly from the authorization server: + +Username and password grant sequence + +### Access Token Request + +The *client* obtains an access token for an API proxy on Tyk from a special endpoint automatically added to the proxy definition, accessible from `POST //oauth/token`. + +This endpoint is protected using [Basic Authentication](/api-management/authentication/basic-authentication) where the username is the client Id and the password is the client secret. + +The following parameters are required in the request: + +| Parameter | Value | +| :--------------- | :------------------------------------------------------ | +| `grant_type` | `password` | +| `client_id` | client Id | +| `username` | resource owner's username (`resource-owner-username`) | +| `password` | resource owner's password (`resource-owner-password`) | + +For example: + +```bash +curl -X POST \ + https://tyk.cloud.tyk.io/my-api/oauth/token/ \ + -H "Authorization: Basic bXktY2xpZW50LWlkOm15LWNsaWVudC1zZWNyZXQ=" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "grant_type=password&client_id=my-client-id&username=resource-owner-username&password=resource-owner-password" +``` + +This command, issued by the *client* will obtain an access token for the `/my-api` proxy deployed on a Tyk Gateway at `https://tyk.cloud.tyk.io`. The basic auth key is the base64 encoded representation of `my-client-id:my-client-secret` The `client_id` and `client_secret` match those allocated by Tyk (the auth server) for the *client app*. + +The response payload contains: +- `access_token`: the token which can be used by the *client* to access the protected API +- `expires_in`: the expiration date/time of the access token +- `token_type`: set to `bearer` indicating that the access token should be provided in an [Auth Token](/api-management/authentication/bearer-token) request to the protected API +- `refresh_token`: [optional] a special token that can be used in the [Refresh Token](#using-refresh-tokens) flow + +For example: + +```json +{ + "access_token": "580defdbe1d21e0001c67e5c2a0a6c98ba8b4a059dc5825388501573", + "expires_in": 3600, + "refresh_token": "YjdhOWFmZTAtNmExZi00ZTVlLWIwZTUtOGFhNmIwMWI3MzJj", + "token_type": "bearer" +} +``` + + +## Configuring your API Proxy + +As explained [previously](/api-management/client-authentication#how-does-tyk-implement-authentication-and-authorization), the AuthN/Z methods to be used to secure an API proxy are configured in the API definition. This permits granular application of the most appropriate method to each API deployed on Tyk Gateway. + +When using Tyk as the Authorization Server, the API configuration can be applied using the Tyk Dashboard's API Designer UI, or by direct modification of the API definition. We will provide examples here when using Tyk OAS APIs. If you are using Tyk Classic APIs, the process is very similar, though there are differences in the location and specific labelling of options. + +### Using the Tyk API Designer + +1. Client Authentication is configured on the **Settings** screen within the API Designer, within the **Server** section. Ensure that you are in **Edit** mode, click on the button to **Enable** *Authentication* and then select **Tyk OAuth 2.0** from the drop down options: + +Set Authentication Mode + +2. Select the OAuth Grant Type that you wish to use for the API, if appropriate you can also select the *Refresh Token* grant so that the Auth Server (Tyk) will generate both access and refresh tokens. + +3. Provide the requested configuration options depending on the selected Grant Type. Note that for *Authorization Code Grant*, **Redirect URL** should be the login page for your Identity Server and must be matched by the `redirect_uri` provided in the *client app* (and in the client's authentication request). The [Notifications](#oauth-token-notifications) configuration can be provided for *Authorization Code* and *Password* grants. + +4. Select **Save API** to apply the new settings. + +### Using the API Definition + +The OpenAPI Specification indicates the use of [OAuth 2.0 authentication](https://swagger.io/docs/specification/v3_0/authentication/oauth2/) in the `components.securitySchemes` object using the `type: oauth2`. Tyk supports the [authorizationCode](/api-management/authentication/oauth-2#using-the-authorization-code-grant), [clientCredentials](#using-the-client-credentials-grant) and [password](#using-the-resource-owner-password-grant) flows and implements Relative Endpoint URLs for the `authorizationUrl`, `tokenUrl` and `refreshUrl`. + +```yaml +components: + securitySchemes: + myAuthScheme: + type: oauth2 + flows: + authorizationCode: + authorizationUrl: ... + tokenUrl: ... + scopes: ... + +security: + - myAuthScheme: [] +``` + +With this configuration provided by the OpenAPI description, in the Tyk Vendor Extension we need to enable authentication, to select this security scheme and to indicate where Tyk should look for the OAuth token. Usually the token will be provided in the `Authorization` header, but Tyk is configurable, via the Tyk Vendor Extension, to support custom header keys and credential passing via query parameter or cooke. + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + securitySchemes: + myAuthScheme: + enabled: true + header: + enabled: true + name: Authorization +``` + +Note that URL query parameter keys and cookie names are case sensitive, whereas header names are case insensitive. + +You can optionally [strip the user credentials](/api-management/client-authentication#managing-authorization-data) from the request prior to proxying to the upstream using the `authentication.stripAuthorizationData` field (Tyk Classic: `strip_auth_data`). + +With the OAuth method selected, you'll need to configure Tyk to handle the specific configuration of OAuth grants that you will support. All of the OAuth specific configuration is performed within the [authentication.securitySchemes.oauth](/api-management/gateway-config-tyk-oas#oauth) object in the Tyk Vendor Extension. + +For example: + +```json {hl_lines=["7-11", "14-24", "35-55"],linenos=true, linenostart=1} +{ + "info": { + "title": "My OAuth API", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "security": [ + { + "oauth": [] + } + ], + "paths": {}, + "components": { + "securitySchemes": { + "oauth": { + "type": "oauth2", + "flows": { + "authorizationCode": { + "authorizationUrl": "/oauth/authorize", + "scopes": {}, + "tokenUrl": "/oauth/token" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "My OAuth API", + "state": { + "active": true, + } + }, + "server": { + "authentication": { + "enabled": true, + "securitySchemes": { + "oauth": { + "enabled": true, + "allowedAuthorizeTypes": [ + "code" + ], + "authLoginRedirect": "http:///client-redirect-uri", + "header": { + "enabled": true, + "name": "Authorization" + }, + "notifications": { + "onKeyChangeUrl": "http://notifyme.com", + "sharedSecret": "oauth-shared-secret" + }, + "refreshToken": true + } + } + }, + "listenPath": { + "strip": true, + "value": "/my-oauth-api/" + } + }, + "upstream": { + "url": "http://httpbin.org/" + } + } +} +``` + +In this example: + +- Client authentication has been enabled (line 44) +- The OpenAPI description declares the `oauth` security scheme that expects **Authorization Code** flow. Note that the `authorization URL` and `token URL` are declared relative to the API proxy listen path +- Authorization requests (made to `POST /my-oauth-api/oauth/authorize`) will be redirected to `http:///client-redirect-uri` where the *Resource Owner* should be prompted to authorize the request +- [Notifications](#oauth-token-notifications) of token issuance will be sent to `http://notifyme.com` with the `X-Tyk-Shared-Secret` header set to `oauth-shared-secret` + +The *auth server* (Tyk) will issue an *access token* and *refresh token* in exchange for a valid *authorization code*. Once the client has a valid access token, it will be expected in the `Authorization` header of the request. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk and, with correctly configured and integrated *identity server* can be used to try out OAuth Client Authentication using Tyk as the Authorization Server. + +### Using Tyk Classic APIs + +As noted in the Tyk Classic API [documentation](/api-management/gateway-config-tyk-classic#configuring-authentication-for-tyk-classic-apis), you can select the Tyk as OAuth Server method using the `use_oauth2` option. + +## Managing OAuth Tokens + +### Using Refresh Tokens + +The Refresh Token flow is used to obtain a new *access token* when the current token has expired or is about to expire. This allows clients to maintain a valid *access token* without requiring the user to go through the authentication and authorization process again. + +*Refresh tokens* are single use and, when used, automatically invalidate the access token with which they were issued. This prevents accidental duplication of access tokens granting authorized access to a resource (API). + +A *refresh token* can be issued by the *auth server* alongside the *access token* at the last stage of the OAuth flow for: +- Authentication Code grant +- Resource Owner Password grant + +You configure whether Tyk should issue a refresh token within the [API proxy definition](#configuring-your-api-proxy). + +#### Refreshing an Access Token + +If you have correctly configured your API, then Tyk will provide a *refresh token* with the *access token*. The *client* can subsequently exchange the *refresh token* for a new *access token* without having to re-authenticate, with another call to the `POST //oauth/token` endpoint as follows: + +Refresh Token flow + +This endpoint is protected using Basic Authentication where the username is the *client Id* and the password is the *client secret*. + +The following data is required in the request payload: + +| Parameter | Value | +| :--------------- | :--------------------------------------------------------- | +| `grant_type` | `refresh_token` | +| `client_id` | client Id | +| `client_secret` | client secret | +| `refresh_token` | The refresh token provided with the original access token | + +For example: + +```bash +curl -X POST \ + https://tyk.cloud.tyk.io/my-api/oauth/token/ \ + -H "Authorization: Basic bXktY2xpZW50LWlkOm15LWNsaWVudC1zZWNyZXQ=" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "grant_type=refresh_token&client_id=my-client-id&client_secret=my-client-secret&refresh_token=YjdhOWFmZTAtNmExZi00ZTVlLWIwZTUtOGFhNmIwMWI3MzJj" +``` + +This command, issued by the *client* will obtain a new access token for the `/my-api` proxy deployed on a Tyk Gateway at `https://tyk.cloud.tyk.io`. The basic auth key is the base64 encoded representation of `my-client-id:my-client-secret` The `client_id` and `client_secret` match those allocated by Tyk (the auth server) for the *client app*. The `refresh_token` is a valid *refresh token* previously issued to the *client*. + +The response payload contains: +- `access_token`: a new *access token* which can be used by the *client* to access the protected API +- `expires_in`: the expiration date/time of the access token +- `token_type`: set to `bearer` indicating that the access token should be provided in an [Auth Token](/api-management/authentication/bearer-token) request to the protected API +- `refresh_token`: a new *refresh token* that can be used later to refresh the new *access token* + +For example: + +```json +{ + "access_token": "580defdbe1d21e0001c67e5c2a0a6c98ba8b4a059dc5825388501573", + "expires_in": 3600, + "refresh_token": "NWQzNGVhMTItMDE4Ny00MDFkLTljOWItNGE4NzI1ZGI1NGU2", + "token_type": "bearer" +} +``` + +### Revoking Access Tokens + +OAuth access tokens have built in expiry, but if you need to [revoke](https://tools.ietf.org/html/rfc7009) a client's access to the API before this time, then you can use the option on the [OAuth Client management screen](#using-the-tyk-dashboard-ui) screen in Tyk Dashboard UI or the Tyk Dashboard API to do so. + +Using the **Tyk Dashboard API** you can revoke specific tokens (both access and refresh) or all tokens issued for a specific *client app* as follows: + +- [retrieve a list of all tokens for a client app](/api-management/dashboard-configuration#retrieve-all-current-tokens-for-specified-oauth20-client) +- [revoke a single token](/api-management/dashboard-configuration#revoke-a-single-oauth-client-token) +- [revoke all tokens for a client app](/api-management/dashboard-configuration#revoke-all-oauth-client-tokens) + +These endpoints are protected using the Dashboard API secret assigned to the user managing the tokens, which must be provided in the `Authorization` header. + +In this example, we issue a request to the `/revoke` endpoint of the *auth server* via the Tyk Dashboard API to invalidate a specific *access token*: + +```bash +curl -X POST \ + https://admin.cloud.tyk.io/api/apis/oauth/{CLIENT_ID}/revoke/ \ + -H "Authorization: " \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "token=580defdbe1d21e0001c67e5c2a0a6c98ba8b4a059dc5825388501573&token_type_hint=access_token&client_id=my-client-id&client_secret=my-client-secret" +``` + +Note that the `token_type_hint` must be set to `access_token` or `refresh_token` to match the type of `token` to be revoked. + + +### OAuth Token Notifications + +When operating as an OAuth authorization server, Tyk can generate an event whenever it issues an *access token*. You can configure a dedicated webhook that will be triggered to notify the Resource Owner service of the occurrence of the event. + +OAuth token notifications can only be configured when using **Authorization Code** or **Resource Owner Password Credentials** grants, not when using *Client Credentials* grant because this flow is primarily used for server-to-server communication, where the client acts on its own behalf without user-specific authorization changes. + +You can configure the URL that the webhook will issue a `POST` request and a "shared secret" value that will be provided in a header (`X-Tyk-Shared-Secret`) used to secure the communication to the target application. The OAuth token notification webhook does not support any other authentication method. + +The body of the webhook request will have this content: + +```json +{ + "auth_code": "", + "new_oauth_token": "", + "refresh_token": "", + "old_refresh_token": "", + "notification_type": "" +} +``` + +where +- `auth_code` is the Authorization Code that has been issued +- `new_oauth_token` is the Access Token that has been issued +- `refresh_token` is the Refresh Token that has been issued +- `old_refresh_token` is the Refresh Token that has been consumed when refreshing an access token +- `notification_type` will indicate the cause of the event: + - `new`: a new access token has been issued + - `refresh`: a token has been refreshed and a new refresh token has been issued + +#### Configuring Notifications in the Tyk API Designer + +Client Authentication is configured on the **Settings** screen within the Tyk OAS API Designer, within the **Server** section. Ensuring that you are in **Edit** mode, go to the *Authentication* section where you should have selected **Tyk OAuth 2.0** from the drop down options. + +Here you will see the *Notifications* section where you can configure: + +- Notifications URL +- Notifications Shared Secret + +Remember to select **Save API** to apply these settings to your API. + +#### Configuring Notifications in the Tyk OAS API Definition + +The example given [above](#using-the-api-definition) includes the configuration necessary to issue notifications for token issuance (see lines 48-51 in the example). diff --git a/api-management/automations.mdx b/api-management/automations.mdx new file mode 100644 index 000000000..9835504d6 --- /dev/null +++ b/api-management/automations.mdx @@ -0,0 +1,66 @@ +--- +title: "Tyk Automations Tools" +description: "Tyk Tools that help with automating deployment and API Management operations" +keywords: "Tyk API Management, Tyk Sync, Tyk Operator, Github, Kubernetes, Automations" +sidebarTitle: "Overview" +--- + +import { ResponsiveGrid } from '/snippets/ResponsiveGrid.mdx'; + +## Introduction + +Managing APIs across multiple environments can quickly become complex. Updating and overseeing multiple configurations, security policies, and deployments requires a significant amount of effort without the right tools. Tyk’s suite of automation tools simplifies this process by enabling automated control over API management tasks, helping teams ensure reliability, reduce manual errors, and maintain consistency across deployments. + +In this page, we’ll walk through the primary tools for automating API management with Tyk, including: + +* **Tyk Operator for Kubernetes**: Automate API deployments within Kubernetes environments. +* **Tyk Sync**: Sync configurations across environments for consistent API management. + +## Prerequisites + +Before diving into lifecycle automations with Tyk, ensure you have the following: + +- **A Tyk installation** (Self-Managed or Cloud) + - If you don't have Tyk installed, follow our [installation guide](/tyk-self-managed/install) + - For Tyk Cloud, sign up [here](https://tyk.io/sign-up/) + - Tyk Operator license key. Starting from Tyk Operator v1.0, a valid license key is required. + +- **Access to a Kubernetes cluster v1.19+** (for Tyk Operator sections) + - If you're new to Kubernetes, check out the official [Kubernetes documentation](https://kubernetes.io/docs/setup/) + +- **Helm 3+** (for installing Tyk Operator) + - If you don't have Helm installed, follow the [official Helm installation guide](https://helm.sh/docs/intro/install/) + - Verify your installation by running `helm version` in your terminal + +- **Tyk Dashboard v3+ access** (for Tyk Sync setup) + - Learn how to set up the Tyk Dashboard [here](/api-management/dashboard-configuration) + +- **Basic knowledge of Kubernetes, YAML** (important for Tyk Operator and Tyk Sync) + - For Kubernetes, visit the [official tutorials](https://kubernetes.io/docs/tutorials/) + - For YAML, check out this [YAML tutorial](https://yaml.org/spec/1.2/spec.html) + +If you're missing any of these prerequisites, please follow the provided links to set up the necessary components before proceeding with the lifecycle automation steps. + +## Automation Tools + + + + + +**Read time: 10 mins** + +Synchronize Tyk Environment With GitHub using Tyk Sync. + + + +**Read time: 10 mins** + +API Management in Kubernetes using Tyk Operator. + + + + + +## Conclusion + +With Tyk’s automation tools, you now have a set of options for streamlining API management, from handling deployments within Kubernetes to establishing consistency across multiple environments. By integrating these tools, you can simplify complex API workflows, maintain secure configurations, and save time through reduced manual intervention. \ No newline at end of file diff --git a/api-management/automations/operator.mdx b/api-management/automations/operator.mdx new file mode 100644 index 000000000..6bfb23029 --- /dev/null +++ b/api-management/automations/operator.mdx @@ -0,0 +1,978 @@ +--- +title: "Tyk Operator - API Management in Kubernetes" +description: "Kubernetes native API management using Tyk Operator" +keywords: "Tyk API Management, Tyk Sync, Tyk Operator, Github, Kubernetes, Automations" +sidebarTitle: "Overview" +--- + +## Introduction + +Using Tyk Operator within Kubernetes allows you to manage API lifecycles declaratively. This section provides instructions for setting up and configuring the Tyk Operator to automate API creation, updates, and security in Kubernetes clusters, ensuring your APIs align with Kubernetes management practices. + + +## What is Tyk Operator? +If you’re using Kubernetes, or if you’re building an API that operates within a Kubernetes environment, the Tyk Operator is a powerful tool for automating the API lifecycle. + +Tyk Operator is a native Kubernetes operator, allowing you to define and manage APIs as code. This means you can deploy, update, and secure APIs using the same declarative configuration approach Kubernetes uses for other application components. + +Tyk Operator + +## Key Concepts + +### GitOps With Tyk +With Tyk Operator, you can configure your APIs using Kubernetes native manifest files. You can use the manifest files in a GitOps workflow as the single source of truth for API deployment. + + + +If you use Tyk Operator to manage your APIs, you should set up RBAC such that human users cannot have the "write" permission on the API definition endpoints using Tyk Dashboard. + + + +#### What is GitOps? +β€œGitOps” refers to the operating model of using Git as the β€œsingle source of truth” to drive continuous delivery for infrastructure and software through automated CI/CD workflow. + +#### Tyk Operator in your GitOps workflow +You can install Argo CD, Flux CD or the GitOps tool of your choice in a cluster, and connect it to the Git repository where you version control your API manifests. The tool can synchronise changes from Git to your cluster. The API manifest updates in cluster would be detected by Tyk Operator, which has a Kubernetes controller to automatically reconcile the API configurations on your Tyk Gateway or Tyk Dashboard. + +**Kubernetes-Native Developer Experience** +API Developers enjoy a smoother Continuous Integration process as they can develop, test, and deploy the microservices. API configurations together use familiar development toolings and pipeline. + +**Reliability** +With declarative API configurations, you have a single source of truth to recover after any system failures, reducing the meantime to recovery from hours to minutes. + +#### Single Source of Truth for API Configurations +Tyk Operator will reconcile any divergence between the Kubernetes desired state and the actual state in [Tyk Gateway](/tyk-oss-gateway) or [Tyk Dashboard](/api-management/dashboard-configuration). Therefore, you should maintain the API definition manifests in Kubernetes as the single source of truth for your system. If you update your API configurations using Tyk Dashboard, those changes would be reverted by Tyk Operator eventually. + +To learn more about Gitops with Tyk, refer the following blog posts: +- [GitOps-enabled API management in Kubernetes](https://tyk.io/blog/gitops-enabled-api-management-in-kubernetes/) +- [A practical guide using Tyk Operator, ArgoCD, and Kustomize](https://tyk.io/blog/a-practical-guide-using-tyk-operator-argocd-and-kustomize/) + +### Custom Resources in Tyk + +In Kubernetes, a [Custom Resource (CR)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) is an extension of the Kubernetes API that allows you to introduce custom objects in your cluster. Custom Resources enable you to define and manage custom configurations and settings specific to your applications, making Kubernetes highly extensible. These custom objects are defined using Custom Resource Definitions (CRDs), which specify the schema and structure of the resource. + +Tyk Operator manages multiple custom resources to help users create and maintain their API configurations: + +**TykOasApiDefinition**: Available from Tyk Operator v1.0. It represents a [Tyk OAS API configuration](/api-management/gateway-config-tyk-oas). Tyk OAS API is based on the OpenAPI specification (OAS) and is the recommended format for standard HTTP APIs. + +**ApiDefinition**: Available on all versions of Tyk Operator. It represents a [Tyk Classic API configuration](/api-management/gateway-config-tyk-classic). Tyk Classic API is the traditional format used for defining all APIs in Tyk, and now the recommended format for non-HTTP APIs such as TCP, GraphQL, and Universal Data Graph (UDG). Tyk Operator supports the major features of Tyk Classic API and the feature support details can be tracked [here](/api-management/automations/operator#apidefinition-crd). + +**TykStreamsApiDefinition**: Available from Tyk Operator v1.1. It represents an [Async API configuration](/api-management/event-driven-apis#configuration-options) which is based on [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas). Tyk Operator supports all [Tyk Streams](/api-management/event-driven-apis#) features as they become available on the Gateway. + +**SecurityPolicy**: Available on all versions of Tyk Operator. It represents a [Tyk Security Policy configuration](/tyk-stack/tyk-operator/create-an-api#security-policy-example). Security Policies in Tyk provide a way to define and enforce security controls, including authentication, authorization, and rate limiting for APIs managed in Tyk. Tyk Operator supports essential features of Security Policies, allowing users to centrally manage access control and security enforcement for all APIs across clusters. + +These custom resources enable users to leverage Kubernetes' declarative configuration management to define, modify, and version their APIs, seamlessly integrating with other Kubernetes-based workflows and tools. + +#### Custom Resources for API and Policy Configuration + +The following custom resources can be used to configure APIs and policies at [Tyk Gateway](/tyk-oss-gateway) or [Tyk Dashboard](/api-management/dashboard-configuration). + +| Kind | Group | Version | Description | +| :-------------------- | :------------- | :----------- | :--------------------------------------------------------------------------------------------------- | +| TykOasApiDefinition| tyk.tyk.io | v1alpha1 | Defines configuration of [Tyk OAS API Definition object](/api-management/gateway-config-tyk-oas) | +| ApiDefinition | tyk.tyk.io | v1alpha1 | Defines configuration of [Tyk Classic API Definition object](/api-management/gateway-config-tyk-classic) | +| TykStreamsApiDefinition| tyk.tyk.io | v1alpha1 | Defines configuration of [Tyk Streams](/api-management/event-driven-apis#configuration-options) | +| SecurityPolicy | tyk.tyk.io | v1alpha1 | Defines configuration of [security policies](/api-management/policies#what-is-a-security-policy). Operator supports linking ApiDefinition custom resources in SecurityPolicy's access list so that API IDs do not need to be hardcoded in the resource manifest. | +| SubGraph | tyk.tyk.io | v1alpha1 | Defines a [GraphQL federation subgraph](/api-management/graphql#subgraphs-and-supergraphs). | +| SuperGraph | tyk.tyk.io | v1alpha1 | Defines a [GraphQL federation supergraph](/api-management/graphql#subgraphs-and-supergraphs). | +| OperatorContext | tyk.tyk.io | v1alpha1 | Manages the context in which the Tyk Operator operates, affecting its overall behavior and environment. See [Operator Context](/api-management/automations/operator#multi-tenancy-in-tyk) for details. | + +#### Tyk Classic Developer Portal + +The following custom resources can be used to configure [Tyk Classic Developer Portal](/tyk-developer-portal/tyk-portal-classic). + +| Kind | Group | Version | Description | +| :-------------------- | :------------- | :----------- | :--------------------------------------------------------------------------------------------------- | +| APIDescription | tyk.tyk.io | v1alpha1 | Configures [Portal Documentation](/tyk-apis/tyk-portal-api/portal-documentation). | +| PortalAPICatalogue | tyk.tyk.io | v1alpha1 | Configures [Portal API Catalogue](/getting-started/key-concepts/api-catalogue). | +| PortalConfig | tyk.tyk.io | v1alpha1 | Configures [Portal Configuration](/tyk-apis/tyk-portal-api/portal-configuration). | + + +### Reconciliation With Tyk Operator +#### Controllers & Operators +In Kubernetes, [controllers](https://kubernetes.io/docs/concepts/architecture/controller/) watch one or more Kubernetes resources, which can be built-in types like *Deployments* or custom resources like *ApiDefinition* - in this case, we refer to Controller as Operator. The purpose of a controller is to match the desired state by using Kubernetes APIs and external APIs. + +> A [Kubernetes operator](https://www.redhat.com/en/topics/containers/what-is-a-kubernetes-operator) is an application-specific controller that extends the functionality of the Kubernetes API to create, configure, and manage instances of complex applications on behalf of a Kubernetes user. + +#### Desired State vs Observed State +Let’s start with the *Desired State*. It is defined through Kubernetes Manifests, most likely YAML or JSON, to describe what you want your system to be in. Controllers will watch the resources and try to match the actual state (the observed state) with the desired state for Kubernetes Objects. For example, you may want to create a Deployment that is intended to run three replicas. So, you can define this desired state in the manifests, and Controllers will perform necessary operations to make it happen. + +How about *Observed State*? Although the details of the observed state may change controller by controller, usually controllers update the status field of Kubernetes objects to store the observed state. For example, in Tyk Operator, we update the status to include *api_id*, so that Tyk Operator can understand that the object was successfully created on Tyk. + +#### Reconciliation +Reconciliation is a special design paradigm used in Kubernetes controllers. Tyk Operator also uses the same paradigm, which is responsible for keeping our Kubernetes objects in sync with the underlying external APIs - which is Tyk in our case. + +**When would reconciliation happen?** +
+Before diving into Tyk Operator reconciliation, let's briefly mention some technical details about how and when reconciliation happens. Reconciliation only happens when certain events happen on your cluster or objects. Therefore, Reconciliation will **NOT** be triggered when there is an update or modification on Tyk’s side. It only watches certain Kubernetes events and is triggered based on them. Usually, the reconciliation happens when you modify a Kubernetes object or when the cache used by the controller expires - side note, controllers, in general, use cached objects to reduce the load in the Kube API server. Typically, caches expire in ~10 hours or so but the expiration time might change based on Operator configurations. + +So, in order to trigger Reconciliation, you can either +- modify an object, which will trigger reconciliation over this modified object or, +- restart Tyk Operator pod, which will trigger reconciliation over each of the objects watched by Tyk Operator. + +**What happens during Reconciliation?** +
+Tyk Operator will compare desired state of the Kubernetes object with the observed state in Tyk. If there is a drift, Tyk Operator will update the actual state on Tyk with the desired state. In the reconciliation, Tyk Operator mainly controls three operations; DELETE, CREATE, and UPDATE. + +- **CREATE** - an object is created in Kubernetes but not exists in Tyk +- **UPDATE** - an object is in different in Kubernetes and Tyk (we compare that by hash) +- **DELETE** - an object is deleted in Kubernetes but exists in Tyk + +**Drift Detection** +
+If human operators or any other system delete or modify API Definition from Tyk Gateway or Dashboard, Tyk Operator will restore the desired state back to Tyk during reconciliation. This is called Drift Detection. It can protect your systems from unauthorized or accidental modifications. It is a best practice to limit user access rights on production environment to read-only in order to prevent accidental updates through API Manager directly. + + +### CRD Versioning + +Tyk follows standard practices for naming and versioning custom resources as outlined by the Kubernetes Custom Resource Definition [versioning guidelines](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/). Although we are currently on the `v1alpha1` version, no breaking changes will be introduced to existing Custom Resources without a version bump. This means that any significant changes or updates that could impact existing resources will result in a new version (e.g., `v1beta1` or `v1`) and Operator will continue supporting all CRD versions for a reasonable time before deprecating an older version. This ensures a smooth transition and compatibility, allowing you to upgrade without disrupting your current configurations and workflows. + +For more details on Kubernetes CRD versioning practices, refer to the Kubernetes Custom Resource Definition [Versioning documentation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/). + + +### Operator User +Tyk Operator is a Kubernetes Controller that manages Tyk Custom Resources (CRs) such as API Definitions and Security Policies. Developers define these resources as [Custom Resource (CRs)](#custom-resources-in-tyk), and Tyk Operator ensures that the desired state is reconciled with the Tyk Gateway or Dashboard. This involves creating, updating, or deleting API configurations until the target state matches the desired state. + +For the Tyk Dashboard, Tyk Operator functions as a system user, bound by Organization and RBAC rules. + +During start up, Tyk Operator looks for these keys from `tyk-operator-conf` secret or from the environment variables (listed in the table below). + +| Key or Environment Variable | Description | +|:-----|:-------------| +| `TYK_MODE` | "ce" for OSS or "pro" for licensed users | +| `TYK_URL` | URL of Tyk Gateway or Dashboard API | +| `TYK_ORG` | Organization ID of Operator user | +| `TYK_AUTH` | API key of Operator user | + +These would be the default credentials Tyk Operator uses to connect to Tyk. + + +### Multi-tenancy in Tyk + +Tyk Dashboard is multi-tenant capable, which means you can use a single Tyk Dashboard instance to host separate [organizations](/api-management/dashboard-configuration#organizations) for each team or department. Each organization is a completely isolated unit with its own: + +- API Definitions +- API Keys +- Users +- Developers +- Domain +- Tyk Classic Portal + +This structure is ideal for businesses with a complex hierarchy, where distinct departments operate independently but within the same overall infrastructure. + +Multi-tenancy in Tyk Dashboard + +#### Define OperatorContext for Multi-Tenant API Management + +The `OperatorContext` in Tyk Operator allows you to create isolated management environments by defining specific access parameters for different teams or departments within a shared Tyk Operator instance. It helps you specify: + +- The Tyk Dashboard with which the Operator interacts +- The organization under which API management occurs +- The user identity utilized for requests +- The environment in which the Operator operates + +By setting different `OperatorContext` configurations, you can define unique access and management contexts for different teams. These contexts can then be referenced directly in your `ApiDefinition`, `TykOasApiDefinition`, `TykStreamsApiDefinition` or `SecurityPolicy` custom resource definitions (CRDs) using the `contextRef` field, enabling precise control over API configurations. + +#### Example Scenarios Using OperatorContext + +1. **No OperatorContext Defined** + - If no `OperatorContext` is defined, Tyk Operator defaults to using credentials from the `tyk-operator-conf` secret or from environment variables. This means all API management actions are performed under the system’s default user credentials, with no specific contextual isolation. + +2. **OperatorContext Defined but Not Referenced** + - When an `OperatorContext` is defined but not referenced in an API configuration, Tyk Operator continues to use the default credentials from `tyk-operator-conf`. The specified `OperatorContext` is ignored, resulting in API operations being managed under default credentials. + +3. **OperatorContext Defined and Referenced** + - If a specific `OperatorContext` is both defined and referenced in an API or policy, Tyk Operator utilizes the credentials and parameters from the referenced `OperatorContext` to perform API operations. This allows each API or policy to be managed with isolated configurations, enabling team-based or department-specific API management within the same Kubernetes cluster. + +Using `OperatorContext` offers flexibility for multi-tenancy, helping organizations manage and isolate API configurations based on their specific team or departmental needs. + +Multi-tenancy in Kubernetes Tyk Operator + +### TLS Certificates + +Tyk Operator is designed to offer a seamless Kubernetes-native experience by managing TLS certificates stored within Kubernetes for your API needs. Traditionally, to use a certificate (e.g., as a client certificate, domain certificate, or certificate for accessing an upstream service), you would need to manually upload the certificate to Tyk and then reference it using a 'Certificate ID' in your API definitions. This process can become cumbersome, especially in a Kubernetes environment where certificates are often managed as secrets and may rotate frequently. + +To address this challenge, Tyk Operator allows you to directly reference certificates stored as Kubernetes secrets within your custom resource definitions (CRDs). This reduces operational overhead, minimizes the risk of API downtime due to certificate mismatches, and provides a more intuitive experience for API developers. + +#### Benefits of Managing Certificates with Tyk Operator +- **Reduced operational overhead**: Automates the process of updating certificates when they rotate. +- **Minimized risk of API downtime**: Ensures that APIs continue to function smoothly, even when certificates are updated. +- **Improved developer experience**: Removes the need for API developers to manage certificate IDs manually. + +#### Examples + +| Certificate Type | Supported in ApiDefinition | Supported in TykOasApiDefinition | Supported in TykStreamsApiDefinition | +| :------------------ | :------------- | :--------- | :--------- | +| Client certifates | βœ… [Client mTLS](/basic-config-and-security/security/mutual-tls/client-mtls#setup-static-mtls-in-tyk-operator-using-the-tyk-classic-api-definition) | βœ… [Client mTLS](/basic-config-and-security/security/mutual-tls/client-mtls#setup-static-mtls-in-tyk-operator-using-tyk-oas-api-definition) | Certificate ID can be set in the API Definition but configuring certificates from Secrets in CRD is not supported. | +| Custom domain certificates | βœ… [TLS and SSL](/api-management/certificates#dynamically-setting-ssl-certificates-for-custom-domains) | βœ… [TLS and SSL](/api-management/certificates#dynamically-setting-ssl-certificates-for-custom-domains) | Certificate ID can be set in the API Definition but configuring certificates from Secrets in CRD is not supported. | +| Public keys pinning | βœ… [Certificate pinning](/api-management/upstream-authentication/mtls#using-tyk-operator-to-configure-mtls-for-tyk-classic-apis) | βœ… [Certificate pinning](/api-management/upstream-authentication/mtls#certificate-pinning) | Certificate ID can be set in the API Definition but configuring certificates from Secrets in CRD is not supported. | +| Upstream mTLS | βœ… [Upstream mTLS via Operator](/api-management/upstream-authentication/mtls#using-tyk-operator-to-configure-mtls-for-tyk-classic-apis) | βœ… [Upstream mTLS via Operator](/api-management/upstream-authentication/mtls#using-tyk-operator-to-configure-mtls) | Certificate ID can be set in the API Definition but configuring certificates from Secrets in CRD is not supported. | + +## What Features Are Supported By Tyk Operator? + +### APIDefinition CRD +Tyk stores API configurations as JSON objects called API Definitions. If you are using Tyk Dashboard to manage Tyk, then these are stored in either Postgres or MongoDB, as specified in the database settings. On the other hand, if you are using Tyk OSS, these configurations are stored as files in the /apps directory of the Gateway which is located at the default path /opt/tyk-gateway. + +An API definition includes various settings and middleware that control how incoming requests are processed. + +#### API Types +Tyk supports various API types, including HTTP, HTTPS, TCP, TLS, and GraphQL. It also includes Universal Data Graph versions for unified data access and federation, allowing seamless querying across multiple services. + +| Type | Support | Supported From | Comments | +| :-------------------------------- | :--------- | :---------------- | :------------------------------ | +| HTTP | βœ… | v0.1 | Standard HTTP proxy for API requests. | +| HTTPS | βœ… | v0.4 | Secure HTTP proxy using SSL/TLS encryption. | +| TCP | βœ… | v0.1 | Handles raw TCP traffic, useful for non-HTTP APIs. | +| TLS | βœ… | v0.1 | Handles encrypted TLS traffic for secure communication. | +| GraphQL - Proxy | βœ… | v0.1 | Proxy for GraphQL APIs, routing queries to the appropriate service. | +| Universal Data Graph v1 | βœ… | v0.1 | Supports Universal Data Graph v1 for unified data access. | +| Universal Data Graph v2 | βœ… | v0.12 | Supports the newer Universal Data Graph v2 for more advanced data handling. | +| GraphQL - Federation | βœ… | v0.12 | Supports GraphQL Federation for querying multiple services as one API. | + +#### Management of APIs +Tyk offers flexible API management features such as setting active/inactive status, categorizing and naming APIs, versioning, and defining ownership within teams or organizations for streamlined administration. + +| Type | Support | Supported From | Comments | +| :-------------------------------- | :--------- | :---------------- | :------------------------------ | +| API Name | βœ… | v0.1 | Assign and manage names for your APIs. | +| API Status (inactive/active) | βœ… | v0.2 | Toggle API status between active and inactive. | +| API Categories | βœ… | v0.1 | Categorize APIs for easier management. | +| API ID | βœ… | v0.1 | Assign unique IDs to APIs for tracking and management. | +| API Ownership | βœ… | v0.12 | Define ownership of APIs within teams or organizations. | +| API Versioning | βœ… | v0.1 | Enable version control for APIs. | + +#### Traffic Routing +Tyk enables traffic routing through path-based or host-based proxies and allows redirection to specific target URLs, providing control over how requests are directed to backend services. + +| Type | Supported | Supported From | Comments | +| :--------------------------- | :--------- | :-------------- | :---------------------------- | +| Path-Based Proxy | βœ… | v0.1 | Route traffic based on URL path. | +| Host-Based Proxy | βœ… | v0.1 | Route traffic based on the request host. | +| Target URL | βœ… | v0.1 | Redirect traffic to a specific target URL. | + +#### Client to Gateway Authentication and Authorization +Tyk provides multiple authentication options for client-to-gateway interactions, including keyless access, JWT, client mTLS, IP allow/block lists, and custom authentication plugins for enhanced security. + +| Type | Supported | Supported From | Comments | +| :----------------------------- | :--------- | :-------------- | :----------------------------------------------- | +| Keyless | βœ… | v0.1 | No authentication required, open access. | +| Auth Token | βœ… | v0.1 | Requires an authentication token (Bearer token).| +| JWT | βœ…οΈ | v0.5 | Uses JSON Web Tokens for secure authentication. | +| OpenID Connect | ❌ | - | Recommended to use JWT for OIDC authentication. | +| OAuth2 | ❌ | - | OAuth2 not supported, JWT is recommended. | +| Client mTLS | βœ… | v0.11 | Supports static client mutual TLS authentication. | +| HMAC | ❌ | - | HMAC authentication is not implemented. | +| Basic Authentication | βœ… | v0.12 | Only supports enabling with default metadata. | +| Custom Authentication Plugin (Go) | βœ… | v0.11 | Custom authentication plugin written in Go. | +| Custom Authentication Plugin (gRPC) | βœ… | v0.1 | Custom authentication plugin using gRPC. | +| Multiple Authentication | βœ… | v0.14 | Chain multiple authentication methods. | +| IP Allowlist | βœ… | v0.5 | Allows access only from specific IP addresses. | +| IP Blocklist | βœ… | v0.5 | Blocks access from specific IP addresses. | + +#### Gateway to Upstream Authentication +Tyk supports secure upstream connections through mutual TLS, certificate pinning, and public key verification to ensure data integrity between the gateway and backend services. For full details, please see the [Upstream Authentication](/api-management/upstream-authentication) section. + +| Type | Supported | Supported From | +| :------------------------------------------------- | :----------- | :---------------- | +| Mutual TLS for upstream connectioons | βœ… | v0.9 | Mutual TLS authentication for upstream connections. | +| Public Key Certificate Pinning | βœ… | v0.9 | Ensures that the upstream certificate matches a known key. | +| Upstream Request Signing using HMAC | βœ… | v1.2.0 | Attach an encrypted signature to requests to verify the gateway as the sender. | + +#### API-level (Global) Features +Tyk offers global features for APIs, such as detailed traffic logging, CORS management, rate limiting, header transformations, and analytics plugins, with support for tagging, load balancing, and dynamic variables. + +| Feature | Supported | Supported From | Comments | +| :-------------------------------------- | :----------- | :---------------- | :------------------------------------------------------------------------ | +| Detailed recording (in Log Browser) | βœ… | v0.4.0 | Records detailed API traffic logs for analysis. | +| Config Data | βœ… | v0.8.2 | Stores additional configuration data for APIs. | +| Context Variables | βœ… | v0.1 | Enables dynamic context-based variables in APIs. | +| Cross Origin Resource Sharing (CORS) | βœ… | v0.2 | Manages CORS settings for cross-domain requests. | +| Service Discovery | ⚠️ | - | Service discovery is untested in this version. | +| Segment Tags | βœ… | v0.1 | Tags APIs for segmentation across environments. | +| Internal API (not exposed by Gateway)| βœ… | v0.6.0 | Internal APIs are not exposed via the Gateway. | +| Global (API-level) Header Transform | βœ… | v0.1.0 | Transforms request and response headers at the API level. | +| Global (API-level) Rate Limit | βœ… | v0.10 | Sets rate limits globally for APIs. | +| Custom Plugins | βœ… | v0.1 | Supports the use of custom plugins for API processing. | +| Analytics Plugin | βœ… | v0.16.0 | Integrates analytics plugins for API monitoring. | +| Batch Requests | ❌ | - | Batch requests are not supported. | +| Custom Analytics Tags (Tag Headers) | βœ… | v0.10.0 | Custom tags for API analytics data. | +| Expire Analytics After | ❌ | - | Not supported in this version. | +| Do not track Analytics (per API) | βœ… | v0.1.0 | Disable analytics tracking on specific APIs. | +| Webhooks | ❌ | - | Webhook support is not available. | +| Looping | βœ… | v0.6 | Enables internal looping of API requests. | +| Round Robin Load Balancing | βœ… | - | Supports round-robin load balancing across upstream servers. | + +#### Endpoint-level Features +For specific API endpoints, Tyk includes features like caching, circuit breaking, request validation, URL rewriting, and response transformations, allowing for precise control over request processing and response handling at an endpoint level. + +| Endpoint Middleware | Supported | Supported From | Comments | +| :----------------------------------- | :----------- | :---------------- | :------------------------------------------------ | +| Allow list | βœ…οΈ | v0.8.2 | Allows requests only from approved sources. | +| Block list | βœ…οΈ | v0.8.2 | Blocks requests from disapproved sources. | +| Cache | βœ… | v0.1 | Caches responses to reduce latency. | +| Advance Cache | βœ… | v0.1 | Provides advanced caching capabilities. | +| Circuit Breaker | βœ… | v0.5 | Prevents service overload by breaking circuits. | +| Track Endpoint | βœ… | v0.1 | Tracks API endpoint usage for analysis. | +| Do Not Track Endpoint | βœ… | v0.1 | Disables tracking for specific endpoints. | +| Enforced Timeouts | βœ… | v0.1 | Ensures timeouts for long-running requests. | +| Ignore Authentication | βœ… | v0.8.2 | Bypasses authentication for selected endpoints.| +| Internal Endpoint | βœ… | v0.1 | Restricts access to internal services. | +| URL Rewrite | βœ…οΈ | v0.1 | Modifies request URLs before processing. | +| Validate Request | βœ… | v0.8.2 | Validates incoming requests before forwarding. | +| Rate Limit | ❌ | - | Rate limiting is not supported per endpoint. | +| Request Size Limit | βœ…οΈ | v0.1 | Limits the size of requests to prevent overload.| +| Request Method Transform | βœ… | v0.5 | Modifies HTTP methods for incoming requests. | +| Request Header Transform | βœ… | v0.1 | Transforms request headers. | +| Request Body Transform | βœ… | v0.1 | Transforms request bodies for processing. | +| Request Body JQ Transform | ⚠️ | v0.1 | Requires JQ support on the Gateway Docker image.| +| Response Header Transform | βœ… | v0.1 | Transforms response headers. | +| Response Body Transform | βœ… | v0.1 | Transforms response bodies. | +| Response Body JQ Transform | ⚠️ | v0.1 | Requires JQ support on the Gateway Docker image.| +| Mock Response | βœ… | v0.1 | Simulates API responses for testing. | +| Virtual Endpoint | βœ… | v0.1 | Allows creation of dynamic virtual endpoints. | +| Per-Endpoint Plugin | ❌ | - | Plugin support per endpoint is not available. | +| Persist Graphql | ❌ | - | Not supported in this version. | + + +### TykOasAPIDefinition CRD +The TykOasApiDefinition Custom Resource Definition (CRD) manages [Tyk OAS API Definition objects](/api-management/gateway-config-tyk-oas) within a Kubernetes environment. This CRD enables the integration and management of Tyk API definitions using Kubernetes-native tools, simplifying the process of deploying and managing OAS APIs on the Tyk Dashboard. + +#### TykOasApiDefinition Features + +`TykOasApiDefinition` can support all features of the Tyk OAS API definition. You just need to provide the Tyk OAS API definition via a ConfigMap. In addition to managing the CRUD (Create, Read, Update, Delete) of Tyk OAS API resources, the Tyk Operator helps you better manage resources through object linking to Ingress, Security Policies, and certificates stored as Kubernetes secrets. See below for a list of Operator features and examples: + +| Features | Support | Supported From | Comments | Example | +| :---------- | :--------- | :----------------- | :---------- | :-------- | +| API Category | βœ… | v1.0 | - | [Manage API Categories](#api-categories) | +| API Version | βœ… | v1.0 | - | [Manage API versioning](#api-versioning) | +| API Ownership via OperatorContext | βœ… | v1.0 | - | [API Ownership](/api-management/user-management#when-to-use-api-ownership) | +| Client Certificates | βœ… | v1.0 | - | [Manage TLS certificate](#tls-certificates) | +| Custom Domain Certificates | βœ… | v1.0 | - | [Manage TLS certificate](#tls-certificates) | +| Public keys pinning | βœ… | v1.0 | - | [Manage TLS certificate](#tls-certificates) | +| Upstream mTLS | βœ… | v1.0 | - | [Manage TLS certificate](#tls-certificates) | +| Kubernetes Ingress | βœ… | v1.0 | - | [Kubernetes Ingress Controller](/product-stack/tyk-operator/tyk-ingress-controller) | +| Link with SecurityPolicy | βœ… | v1.0 | - | [Protect an API](/tyk-stack/tyk-operator/create-an-api#add-a-security-policy-to-your-api) | + +### TykStreamsApiDefinition CRD +The TykStreamsApiDefinition Custom Resource Definition (CRD) manages [Async API configuration](/api-management/event-driven-apis#configuration-options) within a Kubernetes environment. + +#### TykStreamsApiDefinition Features + +`TykStreamsApiDefinition` can support all features of [Tyk Streams](/api-management/event-driven-apis#). You just need to provide the Tyk Streams API definition via a ConfigMap. In addition to managing the CRUD (Create, Read, Update, Delete) of Tyk Streams API resources, the Tyk Operator helps you better manage resources through object linking to Security Policies. See below for a list of Operator features and examples: + +| Features | Support | Supported From | Comments | Example | +| :---------- | :--------- | :----------------- | :---------- | :-------- | +| API Ownership via OperatorContext | βœ… | v1.0 | - | [API Ownership](/api-management/user-management#when-to-use-api-ownership) | +| Link with SecurityPolicy | βœ… | v1.0 | - | [Protect an API](/tyk-stack/tyk-operator/create-an-api#add-a-security-policy-to-your-api) | + +### Version Compatability +Ensuring compatibility between different versions is crucial for maintaining stable and efficient operations. This document provides a comprehensive compatibility matrix for Tyk Operator with various versions of Tyk and Kubernetes. By understanding these compatibility details, you can make informed decisions about which versions to deploy in your environment, ensuring that you leverage the latest features and maintain backward compatibility where necessary. + +#### Compatibility with Tyk +Tyk Operator can work with all version of Tyk beyond Tyk 3.x+. Since Tyk is backward compatible, you can safely use the +latest version of Tyk Operator to work with any version of Tyk. +However, if you're using a feature that was not yet available on an earlier version of Tyk, e.g. Defining a Subgraph with Tyk 3.x, you'll see error in Tyk Operator controller manager logs. + +See [Release notes](/developer-support/release-notes/operator) to check for each Tyk Operator release, +which version of Tyk it is tested against. + +| Tyk Version | 3.2 | 4.0 | 4.1 | 4.2 | 4.3 | 5.0 | 5.2 | 5.3 | 5.4 | 5.5 | 5.6 | 5.7 | +| :-------------------- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | +| Tyk Operator v0.13 | Y | | | | Y | | | | | | | | +| Tyk Operator v0.14 | Y | Y | | | Y | Y | | | | | | | +| Tyk Operator v0.14.1 | Y | Y | | | Y | Y | | | | | | | +| Tyk Operator v0.15.0 | Y | Y | | | Y | Y | | | | | | | +| Tyk Operator v0.15.1 | Y | Y | | | Y | Y | | | | | | | +| Tyk Operator v0.16.0 | Y | Y | | | Y | Y | Y | | | | | | +| Tyk Operator v0.17.0 | Y | Y | | | Y | Y | Y | Y | | | | | +| Tyk Operator v0.17.1 | Y | Y | | | | Y | Y | Y | | | | | +| Tyk Operator v0.18.0 | Y | Y | | | | Y | Y | Y | Y | | | | +| Tyk Operator v1.0.0 | Y | Y | | | | Y | | Y | | Y | Y | | +| Tyk Operator v1.1.0 | Y | Y | | | | Y | | Y | | Y | Y | Y | + +#### Compatibility with Kubernetes Version + +See [Release notes](https://github.com/TykTechnologies/tyk-operator/releases) to check for each Tyk Operator release, +which version of Kubernetes it is tested against. + +| Kubernetes Version | 1.19 | 1.20 | 1.21 | 1.22 | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | 1.30 | +| :-------------------- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | :---- | +| Tyk Operator v0.13 | Y | Y | Y | Y | Y | Y | Y | | | | | | +| Tyk Operator v0.14 | Y | Y | Y | Y | Y | Y | Y | | | | | | +| Tyk Operator v0.14.1 | | Y | Y | Y | Y | Y | Y | Y | | | | | +| Tyk Operator v0.15.0 | | Y | Y | Y | Y | Y | Y | Y | | | | | +| Tyk Operator v0.15.1 | | Y | Y | Y | Y | Y | Y | Y | | | | | +| Tyk Operator v0.16.0 | | Y | Y | Y | Y | Y | Y | Y | | | | | +| Tyk Operator v0.17.0 | | | | | | | Y | Y | Y | Y | Y | | +| Tyk Operator v0.17.1 | | | | | | | Y | Y | Y | Y | Y | | +| Tyk Operator v0.18.0 | | | | | | | Y | Y | Y | Y | Y | | +| Tyk Operator v1.0.0 | | | | | | | Y | Y | Y | Y | Y | Y | +| Tyk Operator v1.1.0 | | | | | | | Y | Y | Y | Y | Y | Y | + + +### Security Policy CRD +The SecurityPolicy custom resource defines configuration of [Tyk Security Policy object](/api-management/policies). + +Here are the supported features: + +| Features | Support | Supported From | Example | +| :-------------------------------- | :----------- | :---------------- | :--------- | +| API Access | βœ… | v0.1 | [API Access](/tyk-stack/tyk-operator/create-an-api#define-the-security-policy-manifest) | +| Rate Limit, Throttling, Quotas | βœ… | v0.1 | [Rate Limit, Throttling, Quotas](/tyk-stack/tyk-operator/create-an-api#define-the-security-policy-manifest) | +| Meta Data & Tags | βœ… | v0.1 | [Tags and Meta-data](/tyk-stack/tyk-operator/create-an-api#define-the-security-policy-manifest) | +| Path and Method based permissions | βœ… | v0.1 | [Path based permission](/tyk-stack/tyk-operator/create-an-api#security-policy-example) | +| Partitions | βœ… | v0.1 | [Partitioned policies](/tyk-stack/tyk-operator/create-an-api#security-policy-example) | +| Per API limit | βœ… | v1.0 | [Per API Limit](/tyk-stack/tyk-operator/create-an-api#security-policy-example) | +| Per-Endpoint limit | βœ… | v1.0 | [Per Endpoint Limit](/tyk-stack/tyk-operator/create-an-api#security-policy-example) | + +## Manage API MetaData + + +### API Name + +#### Tyk OAS API and Tyk Streams API + +API name can be set through `x-tyk-api-gateway.info.name` field in [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas) object. + +#### Tyk Classic API + +To set the name of an API in the `ApiDefinition`, use the `spec.name` string field. This name is displayed on the Tyk Dashboard and should concisely describe what the API represents. + +Example: + +```yaml {linenos=true, linenostart=1, hl_lines=["6-6"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: example-api # This is the metadata name of the Kubernetes resource +spec: + name: Example API # This is the "API NAME" in Tyk + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://example.com + listen_path: /example + strip_listen_path: true +``` + +### API Status + +#### API Active Status + +An active API will be loaded to the Gateway, while an inactive API will not, resulting in a 404 response when called. + +#### Tyk OAS API and Tyk Streams API + +API active state can be set through `x-tyk-api-gateway.info.state.active` field in [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas) object. + +#### Tyk Classic API + +The active status of an API can be set by modifying the `spec.active` configuration parameter. When set to `true`, this enables the API so that Tyk will listen for and process requests made to the `listenPath`. + +```yaml {linenos=true, linenostart=1, hl_lines=["9-9"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: inactive-api +spec: + name: Inactive API + use_keyless: true + protocol: http + active: false + proxy: + target_url: http://inactive.example.com + listen_path: /inactive + strip_listen_path: true +``` + +### API Accessibility + +An API can be configured as internal so that external requests are not processed. + +#### Tyk OAS API and Tyk Streams API + +API accessibility can be set through `x-tyk-api-gateway.info.state.internal` field in [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas) object. + +#### Tyk Classic API + +API accessibility can be set through the `spec.internal` configuration parameter as shown in the example below. + +```yaml {linenos=true, linenostart=1, hl_lines=["10-10"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: inactive-api +spec: + name: Inactive API + use_keyless: true + protocol: http + active: true + internal: true + proxy: + target_url: http://inactive.example.com + listen_path: /inactive + strip_listen_path: true +``` + +### API ID + +#### Creating a new API + +If you're creating a new API using Tyk Operator, you don't need to specify the ID. The API ID will be generated in a deterministic way. + +#### Tyk OAS API and Tyk Streams API + +The generated ID is stored in `status.id` field. Run the following command to inspect generated API ID of a Tyk OAS API. + +```bash +% kubectl get tykoasapidefinition [API_NAME] --namespace [NAMESPACE] -o jsonpath='{.status.id}' +ZGVmYXVsdC9wZXRzdG9yZQ +``` + +In this example, the generated API ID is `ZGVmYXVsdC9wZXRzdG9yZQ`. + +#### Tyk Classic API + +The generated ID is stored in `status.api_id` field. Run the following command to inspect generated API ID of a Tyk Classic API. + +```bash +% kubectl get apidefinition [API_NAME] --namespace [NAMESPACE] -o jsonpath='{.status.api_id}' +ZGVmYXVsdC90ZXN0 +``` + +In this example, the generated API ID is `ZGVmYXVsdC90ZXN0`. + +### Updating an existing API + +#### Tyk OAS API and Tyk Streams API + +If you already have API configurations created in the Tyk Dashboard and want to start using Tyk Operator to manage these APIs, you can include the existing API ID in the manifest under the `x-tyk-api-gateway.info.id` field in [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas) object. + +#### Tyk Classic API + +If you already have API configurations created in the Tyk Dashboard and want to start using Tyk Operator to manage these APIs, you can include the existing API ID in the manifest under the `spec.api_id` field. This way, when you apply the manifest, Tyk Operator will not create a new API in the Dashboard. Instead, it will update the original API with the Kubernetes spec. + +Example + +```yaml {linenos=true, linenostart=1, hl_lines=["8-8"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: existing-api + namespace: default +spec: + name: Existing API + api_id: 12345 + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://existing.example.com + listen_path: /existing + strip_listen_path: true +``` + +In this example, the API with ID `12345` will be updated according to the provided spec instead of creating a new API. + + +### API Categories +[API categories](/api-management/dashboard-configuration#governance-using-api-categories) are configured differently for Tyk OAS APIs and Tyk Classic APIs. Please see below for examples. + +#### Tyk OAS API + +API categories can be specified through `categories` field in `TykOasApiDefinition` CRD. + +Here's an example: + +```yaml {linenos=true, linenostart=1, hl_lines=["7-9"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: oas-api-with-categories + namespace: tyk +spec: + categories: + - category 1 + - category 2 + tykOAS: + configmapRef: + keyName: oas-api-definition.json + name: tyk-oas-api-config + namespace: tyk +``` + +#### Tyk Streams API + +As of Tyk Operator v1.1, API categories is not supported in `TykStreamsApiDefinition` CRD. + +#### Tyk Classic API + +For a Tyk Classic API, you can specify the category name using the `name` field with a `#` qualifier. This will categorize the API in the Tyk Dashboard. See [How API categories work](/api-management/dashboard-configuration#tyk-classic-apis) to learn about limitations on API names. + +Example + +```yaml {linenos=true, linenostart=1, hl_lines=["6-6"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: categorized-api +spec: + name: "my-classic-api #global #staging" + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://categorized.example.com + listen_path: /categorized + strip_listen_path: true +``` + +### API Versioning +[API versioning](/api-management/api-versioning) are configured differently for [Tyk OAS APIs](#tyk-oas-api) and [Tyk Classic APIs](#tyk-classic-api). Please see below for examples. + +#### Configuring API Version in Tyk OAS API Definition + +In the [Tyk OAS API Definition](/api-management/api-versioning), versioning can be configured via `x-tyk-api-gateway.versioning` object of the Base API, where the child API's IDs are specified. In the Kubernetes environment with Tyk Operator, where we reference API resources through its Kubernetes name and namespace, this is not desired. Therefore, we add support for versioning configurations through the field `versioning` in `TykOasApiDefinition` custom resource definition (CRD). + +Here's an example: + +```yaml{linenos=true, linenostart=1, hl_lines=["12-24"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: order-api + namespace: default +spec: + tykOAS: + configmapRef: + namespace: default + name: order-api + keyName: order-api-definition-v1.json + versioning: + enabled: true + location: header + key: x-api-version + name: v1 + default: v1 + fallbackToDefault: true + stripVersioningData: true + versions: + - name: v2 + tykOasApiDefinitionRef: + name: order-api-v2 + namespace: default +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: order-api-v2 + namespace: default +spec: + tykOAS: + configmapRef: + namespace: default + name: order-api-v2 + keyName: order-api-definition-v2.json +``` + +In this example, two different versions of an API are defined: `order-api` (v1) and `order-api-v2` (v2). + +`versioning` is configured at `order-api` (v1), the Base API, and it has similiar structure as [Tyk OAS API Definition](/api-management/api-versioning): + +- `versioning`: This object configures API versioning for the `order-api`. + - `enabled`: Set to true to enable versioning. + - `name`: an identifier for this version of the API (v1). + - `default`: Specifies the default version (v1), which will be used if no version is specified in the request. + - `location`: Specifies where the version key is expected (in this case, in the header). It can be set to `header` or `url-param`. + - `key`: Specifies the versioning identifier key (`x-api-version`) to identify the version. In this example, the version is determined by an HTTP header named `x-api-version`. + - `fallbackToDefault`: When set to true, if an unspecified or invalid version is requested, the default version (v1) will be used. + - `stripVersioningData`: When true, removes versioning identifier (like headers or query parameters) from the upstream request to avoid exposing internal versioning details. + - `urlVersioningPattern`: Specifies a regex that matches the format that you use for the versioning identifier (name) if you are using stripVersioningData and fallBackToDefault with location=url with Tyk 5.5.0 or later + - `versions`: Defines the list of API versions available: + - `name`: an identifier for this version of the API (v2). + - `tykOasApiDefinitionRef`: Refers to a separate TykOasApiDefinition resource that represent a new API version. + - `name`: Kubernetes metadata name of the resource (`order-api-v2`). + - `namespace`: Kubernetes metadata namespace of the resource (`default`). + +With Tyk Operator, you can easily associate different versions of your APIs using their Kubernetes names. This eliminates the need to include versioning information directly within the base API's definition (`x-tyk-api-gateway.versioning` object), which typically requires referencing specific API IDs. Instead, the Operator allows you to manage versioning declaratively in the `TykOasApiDefinition` CRD, using the `versioning` field to specify versions and their Kubernetes references (names and namespaces). + +When using the CRD for versioning configuration, you don't have to worry about knowing or managing the unique API IDs within Tyk. The Tyk Operator handles the actual API definition configuration behind the scenes, reducing the complexity of version management. + +In case if there is original versioning information in the base API Definition, the versioning information will be kept and be merged with what is specified in CRD. If there are conflicts between the Tyk OAS API Definition and CRD, we will make use of CRD values as the final configuration. + +Tyk Operator would also protect you from accidentally deleting a version of an API that is being referenced by another API, maintaining your API integrity. + +#### Configuring API Version in Tyk Streams API Definition + +As of Tyk Operator v1.1, API versioning is not supported in `TykStreamsApiDefinition` CRD. This can be configured natively in the Tyk Streams API Definition. + +#### Configuring API Version in Tyk Classic API Definition + +For Tyk Classic API, versioning can be configured via `ApiDefinition` custom resource definition (CRD). See [Tyk Classic versioning](/api-management/gateway-config-tyk-classic#tyk-classic-api-versioning) for a comprehensive example of configuring API versioning for Tyk Classic API with Tyk Operator. + +### API Ownership + +Please consult the [API Ownership](/api-management/user-management#api-ownership) documentation for the fundamental concepts of API Ownership in Tyk and [Operator Context](/api-management/automations/operator#multi-tenancy-in-tyk) documentation for an overview of the use of OperatorContext to manage resources for different teams effectively. + +The guide includes practical examples for managing API ownership via OperatorContext. Key topics include defining user owners and user group owners in OperatorContext for connecting and authenticating with a Tyk Dashboard, and using `contextRef` in `TykOasApiDefinition` or `ApiDefinition` objects to ensure configurations are applied within specific organizations. The provided YAML examples illustrate how to set up these configurations. + +#### How API Ownership works in Tyk Operator + +In Tyk Dashboard, API Ownership ensures that only designated 'users' who own an API can modify it. This security model is crucial for maintaining control over API configurations, especially in a multi-tenant environment where multiple teams or departments may have different responsibilities and permissions. + +Tyk Operator is designed to interact with Tyk Dashboard as a system user. For the Tyk Dashboard, Tyk Operator is just another user that must adhere to the same access controls and permissions as any other user. This means: + +- Tyk Operator needs the correct access rights to modify any APIs. +- It must be capable of managing APIs according to the ownership rules set in Tyk Dashboard. + +To facilitate API ownership and ensure secure operations, Tyk Operator must be able to 'impersonate' different users for API operations. This is where `OperatorContext` comes into play. Users can define different `OperatorContext` objects that act as different agents to connect to Tyk Dashboard. Each `OperatorContext` can specify different access parameters, including the user access key and organization it belongs to. Within `OperatorContext`, users can specify the IDs of owner users or owner user groups. All APIs managed through that `OperatorContext` will be owned by the specified users and user groups, ensuring compliance with Tyk Dashboard's API ownership model. + +Enabling API ownership with OperatorContext + +#### OperatorContext + +Here's how `OperatorContext` allows Tyk Operator to manage APIs under different ownerships: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: OperatorContext +metadata: + name: team-alpha + namespace: default +spec: + env: + # The mode of the admin api + # ce - community edition (open source gateway) + # pro - dashboard (requires a license) + mode: pro + # Org ID to use + org: *YOUR_ORGANIZATION_ID* + # The authorization token this will be set in x-tyk-authorization header on the + # client while talking to the admin api + auth: *YOUR_API_ACCESS_KEY* + # The url to the Tyk Dashboard API + url: http://dashboard.tyk.svc.cluster.local:3000 + # Set this to true if you want to skip tls certificate and host name verification + # this should only be used in testing + insecureSkipVerify: true + # For ingress the operator creates and manages ApiDefinition resources, use this to configure + # which ports the ApiDefinition resources managed by the ingress controller binds to. + # Use this to override default ingress http and https port + ingress: + httpPort: 8000 + httpsPort: 8443 + # Optional - The list of users who are authorized to update/delete the API. + # The user pointed by auth needs to be in this list, if not empty. + user_owners: + - a1b2c3d4e5f6 + # Optional - The list of groups of users who are authorized to update/delete the API. + # The user pointed by auth needs to be a member of one of the groups in this list, if not empty. + user_group_owners: + - 1a2b3c4d5e6f +``` + +#### Tyk OAS API and Tyk Streams API + +Once an `OperatorContext` is defined, you can reference it in your Tyk OAS or Tyk Streams API Definition objects using `contextRef`. Below is an example with TykOasApiDefinition: + +```yaml {hl_lines=["40-43"],linenos=true} +apiVersion: v1 +data: + test_oas.json: |- + { + "info": { + "title": "Petstore", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": {}, + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "Petstore", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://petstore.swagger.io/v2" + }, + "server": { + "listenPath": { + "value": "/petstore/", + "strip": true + } + } + } + } +kind: ConfigMap +metadata: + name: cm + namespace: default +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: petstore +spec: + contextRef: + name: team-alpha + namespace: default + tykOAS: + configmapRef: + name: cm + namespace: default + keyName: test_oas.json +``` + +In this example, the `TykOasApiDefinition` object references the `team-alpha` context, ensuring that it is managed under the ownership of the specified users and user groups. + +#### Tyk Classic API + +Similarly, if you are using Tyk Classic API, you can reference it in your API Definition objects using `contextRef`. Below is an example: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin + namespace: alpha +spec: + contextRef: + name: team-alpha + namespace: default + name: httpbin + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +In this example, the `ApiDefinition` object references the `team-alpha` context, ensuring that it is managed under the ownership of the specified users and user groups. + +## Troubleshooting and FAQ + + + +While Tyk Operator is designed to work within a Kubernetes environment, you can still use it to manage non-Kubernetes Tyk installations. You'll need to: + +1. Run Tyk Operator in a Kubernetes cluster. +2. Configure Tyk Operator to point to your external Tyk installation, e.g. via `tyk-operator-conf`, environment variable, or OperatorContext: +```yaml + TYK_MODE: pro + TYK_URL: http://external-tyk-dashboard + TYK_AUTH: api-access-key + TYK_ORG: org-id +``` + +This allows you to manage your external Tyk installation using Kubernetes resources. + + + +From [Tyk Operator v0.15.0](https://github.com/TykTechnologies/tyk-operator/releases/tag/v0.15.0), we introduce a new status [subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#subresources) in APIDefinition CRD, called _latestTransaction_ which holds information about reconciliation status. + +> The [Status subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource) in Kubernetes is a specialized endpoint that allows developers and operators to retrieve the real-time status of a specific Kubernetes resource. By querying this subresource, users can efficiently access essential information about a resource's current state, conditions, and other relevant details without fetching the entire resource, simplifying monitoring and aiding in prompt decision-making and issue resolution. + +The new status subresource _latestTransaction_ consists of a couple of fields that show the latest result of the reconciliation: +- `.status.latestTransaction.status`: shows the status of the latest reconciliation, either Successful or Failed; +- `.status.latestTransaction.time`: shows the time of the latest reconciliation; +- `.status.latestTransaction.error`: shows the message of an error if observed in the latest transaction. + +**Example: Find out why an APIDefinition resource cannot be deleted** + +Consider the scenario when APIDefinition and SecurityPolicy are connected. Usually, APIDefinition cannot be deleted directly since it is protected by SecurityPolicy. The proper approach to remove an APIDefinition is to first remove the reference to the SecurityPolicy (either by deleting the SecurityPolicy CR or updating SecurityPolicy CR’s specification), and then remove the APIDefinition itself. However, if we directly delete this APIDefinition, Tyk Operator won’t delete the APIDefinition unless the link between SecurityPolicy and APIDefinition is removed. It is to protect the referential integrity between your resources. + +```console +$ kubectl delete tykapis httpbin +apidefinition.tyk.tyk.io "httpbin" deleted +^C% +``` + +After deleting APIDefinition, the operation hangs, and we suspect that something is wrong. +Users might still look through the logs to comprehend the issue, as they did in the past, but they can now examine their APIDefinition’s status subresource to make their initial, speedy issue diagnosis. + +```console +$ kubectl get tykapis httpbin +NAME DOMAIN LISTENPATH PROXY.TARGETURL ENABLED STATUS +httpbin /httpbin http://httpbin.org true Failed +``` +As seen in the STATUS column, something went wrong, and the STATUS is Failed. + +To get more information about the APIDefinition resource, we can use `kubectl describe` or `kubectl get`: +```console +$ kubectl describe tykapis httpbin +Name: httpbin +Namespace: default +API Version: tyk.tyk.io/v1alpha1 +Kind: ApiDefinition +Metadata: + ... +Spec: + ... +Status: + api_id: ZGVmYXVsdC9odHRwYmlu + Latest CRD Spec Hash: 9169537376206027578 + Latest Transaction: + Error: unable to delete api due to security policy dependency=default/httpbin + Status: Failed + Time: 2023-07-18T07:26:45Z + Latest Tyk Spec Hash: 14558493065514264307 + linked_by_policies: + Name: httpbin + Namespace: default +``` +or +```console +$ kubectl get tykapis httpbin -o json | jq .status.latestTransaction +{ + "error": "unable to delete api due to security policy dependency=default/httpbin", + "status": "Failed", + "time": "2023-07-18T07:26:45Z" +} +``` +Instead of digging into Tyk Operator's logs, we can now diagnose this issue simply by looking at the `.status.latestTransaction` field. As `.status.latestTransaction.error` implies, the error is related to *SecurityPolicy* dependency. + + + +Yes, you can use Tyk Operator to manage multiple Tyk installations. You'll need to create separate `OperatorContext` resources for each installation: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: OperatorContext +metadata: + name: prod-context +spec: + env: + TYK_MODE: pro + TYK_URL: http://tyk-dashboard-staging + TYK_AUTH: prod-secret +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: OperatorContext +metadata: + name: staging-context +spec: + env: + TYK_MODE: pro + TYK_URL: http://tyk-dashboard-staging + TYK_AUTH: staging-secret +``` + +Then, you can specify which context to use in your API and Policy resources: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: my-api +spec: + name: My API + context: prod-context + # ... other API configuration +``` + + diff --git a/api-management/automations/sync.mdx b/api-management/automations/sync.mdx new file mode 100644 index 000000000..cc89c2c16 --- /dev/null +++ b/api-management/automations/sync.mdx @@ -0,0 +1,155 @@ +--- +title: "Tyk Sync - Synchronize Tyk Environment With GitHub" +description: "How to synchronize Tyk configuration with Github using Tyk Sync" +keywords: "Tyk API Management, Tyk Sync, Tyk Operator, Github, Kubernetes, Automations" +sidebarTitle: "Overview" +--- + +## Introduction + +Tyk Sync enables you to export and import Tyk configurations directly from Git, keeping environments aligned without manual configuration updates. This section covers the setup and use of Tyk Sync, providing steps to ensure consistent configurations across different environments. + + +## Tyk Sync Features +Tyk Sync works with *Tyk Dashboard* installation. With Tyk Dashboard, Tyk Sync supports managing API definitions, security policies, and API templates. + +| Tyk Sync Feature | Tyk Dashboard (Licensed) | +| :--------------------------------------------------------------------------- | :-------------------------- | +|

Backup objects from Tyk to a directory

If you want to backup your API definitions, policies and templates in Tyk, you can use the `dump` command. It allows you to save the objects in transportable files. You can use this command to backup important API configurations before upgrading Tyk, or to save API configurations from one Dashboard instance and then use `update`, `publish`, or `sync` commands to update the API configurations to another Dashboard instance. | βœ… | +|

Synchronise objects from Git (or any VCS) to Tyk

To implement GitOps for API management, store your API definitions, policies and templates in Git or any version control system. Use the `sync` command to synchronise those objects to Tyk. During this operation, Tyk Sync will delete any objects in the Dashboard that cannot be found in the VCS, and update those that can be found and create those that are missing. | βœ… | +|

Update objects

The `update` command will read from VCS or file system and will attempt to identify matching API definitions, policies and templates in the target Dashboard, and update them. Unmatched objects will not be created. | βœ… | +|

Publish objects

The `publish` command will read from VCS or file system and create API definitions, policies, and templates in target Dashboard. This will not update any existing objects. If it detects a collision, the command will stop. | βœ… | +|

Show and import Tyk examples

The `examples` command allow you to show and import [Tyk examples](https://github.com/TykTechnologies/tyk-examples). An easy way to load up your Tyk installation with some interesting examples!| βœ… | + +### Working with OAS APIs + +Starting with Sync v1.5+ and Dashboard v5.3.2+, Tyk Sync supports both [Tyk OAS APIs](/api-management/gateway-config-tyk-oas) and [Tyk Classic APIs](/api-management/gateway-config-tyk-classic) when working with the Tyk Dashboard, without requiring special flags or configurations. + +For Sync versions v1.4.1 to v1.4.3, enabling Tyk Sync for Tyk OAS APIs requires the [allow-unsafe-oas](/tyk-dashboard/configuration#allow_unsafe_oas) configuration in the Dashboard, along with the `--allow-unsafe-oas` flag when invoking Tyk Sync. Note that Tyk Sync versions v1.4.1 to 1.4.3 do not support API Category for Tyk OAS APIs. + +### Working with Tyk Streams APIs + +Tyk Streams API support was introduced in Tyk Dashboard v5.7.0. Tyk Sync v2.0 and later is compatible with Tyk Streams APIs and manages them similarly to Tyk OAS APIs. With Tyk Sync, you can seamlessly sync, publish, update, and dump Tyk Streams APIs just like OAS APIs. + +Note: The Streams API validator is not applied during these operations. + +### Working with Open Source Gateway + +From Sync v2.0, compatibility with the Open Source Tyk Gateway has been removed, making Tyk Sync v2.0 compatible exclusively with licensed Tyk Dashboard. As a result, Tyk Sync is no longer usable with the Open Source (OSS) version of the Tyk Gateway. + +## Installation + +To install Tyk Sync, follow the instructions in the [Tyk Sync installation guide](/product-stack/tyk-sync/installing-tyk-sync). You can install Tyk Sync using Docker or download the binary directly. + +## Quick Start Guide + +For a quick start guide to using Tyk Sync, refer to the [Tyk Sync Quick Start Guide](/api-management/sync/quick-start). This guide will help you set up Tyk Sync, dump API configurations, and synchronize them with your Tyk Dashboard. + +## Glossary + +### Tyk Sync +A command line tool and library designed to manage and synchronize Tyk API Gateway configurations with version control systems. Originally called "tyk-git," it was renamed to "tyk-sync" as its capabilities expanded beyond Git to support synchronization with any file system. + +### Synchronization +The process of ensuring that API and policy configurations in your Tyk Gateway match those stored in your version control system. Tyk Sync performs one-way synchronization, where definitions are written from the VCS to the Tyk Dashboard. + +### Spec File (.tyk.json) +A metadata file created by Tyk Sync that contains information about the APIs and policies in a directory. This file is used during synchronization to determine what needs to be created, updated, or deleted. + +## FAQ + + + +Tyk Sync is designed to dump API configurations from a Tyk Dashboard, not directly from a Tyk Gateway. + +Tyk Sync's `dump` command is specifically designed to work with the Tyk Dashboard. The command requires a dashboard URL and API secret: + +```bash +tyk-sync dump -d="http://dashboard-url" -s="dashboard-secret" -t="./output-directory" +``` + +There is no equivalent flag or functionality to dump configurations directly from a standalone Gateway. This is because: + +1. The Dashboard serves as the central configuration repository in the Tyk architecture +2. The Gateway is primarily focused on runtime execution of those configurations +3. While Gateways can operate standalone, they don't expose the same management APIs as the Dashboard + + + +The three commands in Tyk Sync have distinct purposes and behaviors when managing API configurations: + +**sync** +- **Purpose**: Comprehensive synchronization from a source (Git repo or file system) to Tyk Dashboard +- **Behavior**: + - Creates new APIs, policies, and assets that exist in the source but not in the Dashboard + - Updates existing APIs, policies, and assets that exist in both places + - Deletes APIs, policies, and assets that exist in the Dashboard but not in the source (unless `--no-delete` flag is used) +- **Use case**: When you want to make the Dashboard exactly match your source repository + +**publish** +- **Purpose**: Only adds new API configurations to Tyk Dashboard +- **Behavior**: + - Creates new APIs, policies, and assets that don't already exist in the Dashboard + - Will not update existing items + - Stops if it detects a collision (an API that already exists) + - Will not delete anything +- **Use case**: When you want to add new APIs without affecting existing ones + +**update** +- **Purpose**: Only updates existing API configurations in Tyk Dashboard +- **Behavior**: + - Updates APIs, policies, and assets that already exist in the Dashboard + - Will not create new items + - Will not delete anything +- **Use case**: When you want to update existing APIs without adding new ones or removing any + +In summary, "sync" is the most comprehensive operation (create + update + delete), "publish" only creates new items, and "update" only modifies existing items. + + + +Tyk Sync allows you to dump configurations to a local directory, which can then be committed to a Git repository. This enables version control and easy synchronization across environments. + +For example: +1. Dump configurations: `tyk-sync dump -d http://dashboard:3000 -s secret -t ./configs` +2. Commit to Git: + ``` + cd configs + git add . + git commit -m "Update Tyk configurations" + git push + ``` + + + +Yes, you can store multiple API definitions, policies, and other Tyk resources in a single Git repository. Tyk Sync and Tyk Operator can work with multiple resources in the same directory. + +Your repository structure might look like this: +``` +tyk-configs/ +β”œβ”€β”€ apis/ +β”‚ β”œβ”€β”€ api1.yaml +β”‚ └── api2.yaml +β”œβ”€β”€ policies/ +β”‚ β”œβ”€β”€ policy1.yaml +β”‚ └── policy2.yaml +└── tyk-operator/ + └── operator-context.yaml +``` + + + +To roll back changes made with Tyk Sync: + +1. If you're using Git, check out the previous version of your configurations: + ```bash + git checkout + ``` + +2. Use Tyk Sync to publish the previous version: + ```bash + tyk-sync sync -d http://dashboard:3000 -s -p ./ + ``` + +It's a good practice to maintain separate branches or tags for different environments to make rollbacks easier. + + diff --git a/api-management/batch-processing.mdx b/api-management/batch-processing.mdx new file mode 100644 index 000000000..5c5197931 --- /dev/null +++ b/api-management/batch-processing.mdx @@ -0,0 +1,234 @@ +--- +title: "Batch Processing" +description: "Make multiple API requests in a single HTTP call using Batch Requests" +keywords: "Request Optimization, Optimization, Batched Requests, Batch, Batch Processing" +sidebarTitle: "Batch Processing" +--- + +## Overview + +Batch Requests is a powerful Tyk Gateway feature that allows clients to make multiple requests to an API in a single HTTP call. Instead of sending numerous individual requests to the API, clients can bundle these requests together, reducing network overhead and improving performance. + +### What are Batch Requests? + +Batch Requests act as an aggregator for multiple API calls. When a client sends a batch request to Tyk, the Gateway processes each request in the batch individually (applying all relevant middleware, authentication, and rate limiting) and returns a combined response containing the results of all requests. The scope of a batch request is limited to a single API deployed on Tyk, though can comprise requests to different endpoints (method and path) defined for that API. + +### Key Benefits + +- Reduced Network Overhead: Minimize the number of HTTP connections required for multiple related API operations +- Improved Client Performance: Decrease latency by eliminating multiple round-trips to the server +- Simplified Error Handling: Process success and failure responses for multiple operations in a single place +- Maintained Security: Each individual request within a batch still goes through Tyk's full security pipeline +- Flexible Execution: Choose between parallel or sequential execution of requests + +### When to Use Batch Requests + +Batch Requests are ideal for scenarios such as: + +- Mobile applications that need to fetch data from multiple endpoints during startup +- Dashboard applications that need to populate multiple widgets with different API data +- Complex workflows that require data from several API endpoints to complete a single user action +- Integration scenarios where you need to synchronize operations across multiple services + +### How Batch Requests Work + +When Tyk receives a batch request, it: + +- Validates the batch request format +- Processes each request in the batch individually (applying all middleware, authentication, and quotas) +- Collects all responses +- Returns a single combined response to the client + +This process ensures that security is maintained while providing the performance benefits of batching. + +## Using Batch Requests + +### Configuration + +Batch Requests are disabled by default, so you need to enable batch request support in your API definition by setting `server.batchProcessing.enabled` in the Tyk Vendor Extension (Tyk Classic: `enable_batch_request_support`). + +### Batch Request Endpoint + +When batch requests are enabled, Tyk automatically creates an additional logical endpoint on the subrouter for the API. This won't appear in the API definition and so will not be added to the OpenAPI description. This `/tyk/batch/` endpoint accepts requests in a specific "batch" format and processes them as described in the next section. + +For example, if your API's listen path is `/myapi/` the batch request endpoint would be `/myapi/tyk/batch/`. + +Note that the trailing slash `/` at the end of the URL is required when calling this endpoint. + +### Batch Request Format + +Batch requests must be sent as HTTP `POST` requests with a JSON payload that follows this structure: + +```json +{ + "requests": [ + { + "method": "GET", + "headers": { + "x-header-1": "value-1", + "authorization": "your-auth-token" + }, + "body": "", + "relative_url": "resource/123" + }, + { + "method": "POST", + "headers": { + "x-header-2": "value-2", + "authorization": "your-auth-token" + }, + "body": "{\"property\": \"value\"}", + "relative_url": "resource/create" + }, + { + "method": "GET", + "headers": { + "x-header-3": "value-3", + "authorization": "your-auth-token" + }, + "body": "", + "relative_url": "resource/invalid" + } + ], + "suppress_parallel_execution": false +} +``` + +Where: + +- `requests`: An array of individual requests to be processed + - `method`: The HTTP method for the individual request (`GET`, `POST`, `PUT`, `DELETE`, etc.) + - `headers`: Any HTTP headers to include with the request + - `body`: The request body (for `POST`, `PUT` requests) in the format prescribed by the API (e.g. JSON string) + - `relative_url`: The endpoint for the request, which can include query parameters +- `suppress_parallel_execution`: A boolean flag to control whether requests should be processed in parallel (`false`) or sequentially in the order that they appear in the array (`true`) + +In the example above, on receipt of a request to `POST /my-api/tyk/batch` with this payload, Tyk would process three requests in parallel: + +- `GET /my-api/resource/123` passing `x-header-1` and `Authorization` headers +- `POST /my-api/resource/create` passing `x-header-2` and `Authorization` headers and the payload descrbied in `body` +- `GET /my-api/resource/invalid` passing `x-header-3` and `Authorization` headers + +### Execution Order + +Tyk will work through the requests in the batch in the order that they are declared in the `requests` array. The `suppress_parallel_execution` setting is used to determine whether Tyk should wait for each request to complete before starting the next (`true`), or if it should issue all of the requests in parallel (`false`). + +If sequential execution is in use, Tyk will work through the entire `requests` array regardless of whether any requests return errors. All responses (success and failure) will be logged and returned to the client as described [below](/api-management/batch-processing#batch-response-format). + +### Batch Response Format + +When you send a batch request to Tyk, each individual request within the batch is processed independently. This means that some requests in a batch may succeed while others fail. Tyk provides detailed response information for each request in the batch to help you identify and handle errors appropriately. + +The response from a batch request is an array of response objects, each corresponding to one of the requests in the batch in the order that they appeared in the `requests` array: + +```json +[ + { + "relative_url": "resource/123", + "code": 200, + "headers": { + "Content-Type": ["application/json"], + "Date": ["Wed, 15 Mar 2023 12:34:56 GMT"] + }, + "body": "{\"id\":\"123\",\"name\":\"Example Resource\"}" + }, + { + "relative_url": "resource/create", + "code": 201, + "headers": { + "Content-Type": ["application/json"], + "Date": ["Wed, 15 Mar 2023 12:34:56 GMT"] + }, + "body": "{\"id\":\"456\",\"name\":\"New Resource\",\"status\":\"created\"}" + }, + { + "relative_url": "resource/invalid", + "code": 404, + "headers": { + "Content-Type": ["application/json"], + "Date": ["Wed, 15 Mar 2023 12:34:56 GMT"] + }, + "body": "{\"error\":\"Resource not found\"}" + } +] +``` + +Each response object contains: + +- `relative_url`: The URL of the endpoint targeted by the request +- `code`: The HTTP status code returned from the individual request +- `headers`: The response headers +- `body`: The response body as a string + +### Response Status Codes + +The batch endpoint itself returns an `HTTP 200 OK` status code as long as the batch request was properly formatted and processed, regardless of whether individual requests within the batch succeeded or failed. + +To determine the success or failure of individual requests, you need to examine the status code for each request in the response array. + +In the previous example, we can see that the first two requests were successful, returning `HTTP 200 OK` and `HTTP 201 Created`, whereas the third failed returning `HTTP 404 Not found`. + +## Invoking Batch Requests from Custom JavaScript Middleware + +You can make requests to the logical batch request endpoint from within [custom JavaScript middleware](/api-management/plugins/javascript) via the `TykBatchRequest` function that is included in Tyk's [JavaScript API](/api-management/plugins/javascript#javascript-api). + +This integration enables you to: + +- Create batch requests programmatically +- Process batch responses with custom logic +- Implement advanced error handling specific to your use case + +## Security Considerations + +Requests to the `/tyk/batch/` endpoint do not require any authentication, however the requests within the batch (declared in the payload) do not bypass any security mechanisms. + +As this endpoint is keyless, no rate limiting is applied to the requests to `/tyk/batch/`. + +Each request in a batch is processed through Tyk's full security pipeline, including authentication and rate limiting, so API keys or other authentication credentials must be included in each individual request within the batch. + +Rate limiting and quotas are applied to each request in the batch individually - so a batch containing three requests using the same API key will add three to their rate limit and quota counts. This could lead to one or more of the batched requests being rejected. + +This means that, whilst anyone can make a request to the batch endpoint, they can only successfully execute requests within the batch by providing valid authentication credentials in those requests. + +This means that the batch endpoint could potentially be used for reconnaissance, as attackers might determine which APIs exist based on responses. If this is a concern then you could consider: + +- using IP allowlists to restrict access to your API +- using [Internal Looping](/advanced-configuration/transform-traffic/looping) to put the batch request API behind a protected API +- disabling batch requests entirely if you don't need this feature + +## Performance Considerations + +- Setting `suppress_parallel_execution` to `false` provides better performance but doesn't guarantee response order. +- For large batches, consider the impact on your upstream services +- Tyk applies rate limiting to each request in the batch, which may cause some requests to be rejected if limits are exceeded + +## Best Practices when using Tyk's Batch Request feature + +We recommend that you consider the following best practice guidelines when using batch requests: + +- Validate Before Sending: Perform client-side validation before including requests in a batch to minimize predictable errors. +- Implement Timeouts: Set appropriate timeouts for batch requests to prevent long-running operations from blocking your application. +- Log Detailed Errors: Log detailed error information for failed requests to facilitate debugging. +- Group Similar Requests: Group requests with similar authentication requirements and rate limits to minimize errors. +- Implement Circuit Breakers: Use circuit breaker patterns to prevent repeated failures when upstream services are experiencing issues. + +## Troubleshooting + +There are some common issues that can be encountered when using Tyk's batch requests feature. + +### Missed trailing slash + +When an API client makes a request to the logical `tyk/batch/` endpoint, it is essential that the trailing slash is included in the request, otherwise Tyk will return an `HTTP 404` error. + +### Custom domains + +Several specific issues can arise when using batch requests with custom domains: + +**DNS Resolution**: The Tyk Gateway needs to be able to resolve the custom domain internally. If the Gateway can't resolve the custom domain name, batch requests will fail with connection errors, even though external requests to the same API work fine. +**Solution**: Ensure that the Tyk Gateway host can resolve the custom domain, either through proper DNS configuration or by adding entries to the host's `/etc/hosts` file. + +**Internal vs. External Routing**: When a batch request is made to a custom domain, Tyk needs to route the individual requests within the batch correctly. If the custom domain is only configured for external access but not for internal routing, the batch requests may fail. +**Solution**: Configure your custom domain to work with both external and internal routing. + +**Certificate Validation**: If your custom domain uses HTTPS, certificate validation issues can occur during the internal processing of batch requests. +**Solution**: Ensure that the certificates for your custom domain are properly configured and trusted by the Tyk Gateway. \ No newline at end of file diff --git a/api-management/certificates.mdx b/api-management/certificates.mdx new file mode 100644 index 000000000..e7d168496 --- /dev/null +++ b/api-management/certificates.mdx @@ -0,0 +1,454 @@ +--- +title: "Certificates - TLS and SSL" +description: "How to enable SSL with the Tyk Gateway and Dashboard" +keywords: "TLS, SSL, Security, Certificate, Pinning" +order: 2 +sidebarTitle: "Certificates in Tyk" +--- + +## Introduction + +Secure communication is essential in today's digital landscape. TLS/SSL protocol and Public Key Infrastructure (PKI) play a crucial role in ensuring encrypted and authenticated connections. This document provides a comprehensive walkthrough on configuring TLS/SSL, managing certificates for the Tyk Gateway and Dashboard. + +In this section, we delve into the following key topics: + +1. **[Enabling TLS in Tyk components](/api-management/certificates#enable-tlsssl-in-tyk-components)**: + Learn how to enable and configure TLS/SSL for Tyk Gateway and Dashboard to secure your communication. +2. **[TLS Support in Tyk](/api-management/certificates#tlsssl-configuration)**: + Understand the supported TLS versions, cipher suites, their configurations, and best practices for secure communication. +3. **[Configuring Tyk Certificate Storage](/api-management/certificates#using-tyk-certificate-storage)**: + Discover how to manage and store certificates for seamless TLS configuration in Tyk. + Explore advanced TLS settings for enhanced security. +4. **[Self Signed Certificates](/api-management/certificates#self-signed-certificates)**: + Learn how to configure and use self-signed certificates for secure communication in Tyk. +5. **[Configuring Internal Proxy Setup](/api-management/certificates#internal-proxy-setup)**: + Set up internal proxies with TLS to ensure secure communication within your architecture. + +### Certificates + +If you have had to configure an SSL server or SSH access, the following information below should be familiar to you. + +Let's start with certificate definition. Here is what [Wikipedia](https://en.wikipedia.org/wiki/Public_key_certificate) says: + +> In cryptography, a public key certificate, also known as a digital certificate or identity certificate, is an electronic document used to prove the ownership of a public key. The certificate includes information about the key, information about the identity of its owner (called the subject), and the digital signature of an entity that has verified the certificate's contents (called the issuer). If the signature is valid, and the software examining the certificate trusts the issuer, then it can use that key to communicate securely with the certificate's subject. + +When it comes to authorization, it is enough for the server that has a public client certificate in its trusted certificate storage to trust it. However, if you need to send a request to the server protected by mutual TLS, or need to configure the TLS server itself, you also need to have a private key, used while generating the certificate, to sign the request. + +Using Tyk, you have two main certificate use cases: + +1. Certificates without public keys used for [client authorization and authentication](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls) +2. Certificates with private keys used for [upstream access](/api-management/upstream-authentication/mtls), and server certificates (in other words when we need to sign and encrypt the request or response). + +### PEM format + +Before a certificate can be used by Tyk, it must be encoded into **PEM format**. If you are using an `openssl` command to generate certificates, it should use PEM by default. A nice bonus of the PEM format is that it allows having multiple entries inside the same file. So in cases where a certificate also requires a private key, you can just concatenate the two files together. + +## Enable TLS/SSL in Tyk components + +TLS protocol is supported by all Tyk components. You can enable TLS in Tyk Gateway and Dashboard by modifying the `tyk.conf` and `tyk_analytics.conf` files. + +For self signed certificates additional consideration has to be taken place, [refer to the section below](#self-signed-certificates). + +
+ +### Gateway + +You'll need to add the following to your **tyk.conf** as the minimum to enable TLS for the Gateway: + +```json +"http_server_options": { + "use_ssl": true, + "certificates": [ + { + "domain_name": "*.yoursite.com", + "cert_file": "./new.cert.cert", + "key_file": "./new.cert.key" + } + ] +} +``` + +### Dashboard + +You'll need to add the following to your **tyk_analytics.conf** as the minimum to enable TLS for the Dashboard: + +```json +"http_server_options": { + "use_ssl": true, + "certificates": [ + { + "domain_name": "*.yoursite.com", + "cert_file": "./new.cert.cert", + "key_file": "./new.cert.key" + } + ] +} +``` + +Set the [host_config.generate_secure_paths](/tyk-dashboard/configuration#host_configgenerate_secure_paths) flag to `true` so that your Dashboard URL starts with HTTPS. + +If you are using self-signed certs or are in a test environment, [you can tell Tyk to ignore validation on certs Mutual TLS support](#self-signed-certificates) + +### Testing TLS/SSL Configuration + +Restart the servers/containers and they should now be using SSL: +```{.copyWrapper} +$ docker-compose up tyk-gateway tyk-dashboard +... +tyk-gateway_1 | time="Apr 24 18:30:47" level=info msg="--> Using TLS (https)" prefix=main +tyk-gateway_1 | time="Apr 24 18:30:47" level=warning msg="Starting HTTP server on:[::]:443" prefix=main +... +``` + +And then we can curl both servers: +```{.copyWrapper} +$ curl -k https://localhost:8080/hello +{"status":"pass","version":"v3.0.0","description":"Tyk GW","details":{"dashboard":{"status":"pass","componentType":"system","time":"2020-08-28T17:19:49+02:00"},"redis":{"status":"pass","componentType":"datastore","time":"2020-08-28T17:19:49+02:00"}}} + +$ curl -k https://localhost:3000 + +``` + +### MDCB + +Mutual TLS configuration in an MDCB environment has specific requirements. An MDCB environment consists of a Control Plane and multiple Data Planes that, using MDCB, sync configuration. +The Control Plane and Data Plane deployments usually do not share any secrets; thus a certificate with private keys encoded with secret in the Control Plane will not be accessible to Data Plane gateways. + +To solve this issue, you need to set `security.private_certificate_encoding_secret` in the MDCB configuration file to the same value as specified in your management Gateway configuration file. By knowing the original secret, MDCB will be able to decode private keys, and +send them to client without password. Using a secure connection between Data Plane Gateways and MDCB is required in this case. See MDCB setup page for use_ssl usage. + + +## TLS/SSL Configuration + +TLS is configured in the `http_server_options` section of your Gateway and Dashboard configuration files. This has the following structure, common to both components: + +```{.copyWrapper} +"http_server_options": { + "use_ssl": true, + "server_name": "yoursite.com", + "min_version": 771, + "max_version": 772, + "ssl_ciphers": ["TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"], + "certificates": [ + { + "domain_name": "*.yoursite.com", + "cert_file": "./new.cert.cert", + "key_file": "./new.cert.key" + } + ] +}, +``` + +- `min_version` and `max_version` are optional and allow you to configure the [versions of TLS](#supported-tls-versions) from which Tyk will accept connections +- `ssl_ciphers` allows you to select the [cipher suite](#supported-tls-cipher-suites) that will be used to negotiate connections +- you can enter multiple certificates to be used in the encryption that will be applied for different domain names, this enables you to have multiple TLS certs for your Gateways or Dashboard domains if they are providing access to different domains via the same IP + +### Supported TLS Versions + +You need to use the following values for setting the TLS `min_version` and `max_version`. The numbers associated with the TLS versions represent protocol version numbers used in the TLS protocol specification. These are standardized numerical values assigned by the Internet Engineering Task Force (IETF) to identify each TLS version during communication. + +| TLS Version | Value to Use | +| :----------------------- | :---------------- | +| 1.0 (see note) | 769 | +| 1.1 (see note) | 770 | +| 1.2 | 771 | +| 1.3 | 772 | + +If you do not configure minimum and maximum TLS versions, then Tyk Gateway will default to: + - minimum TLS version: 1.2 + - maximum TLS version: 1.3 + + + + + Tyk uses Golang libraries to provide TLS functionality, so the range of TLS versions supported by the Gateway is dependent upon the underlying library. Support for TLS 1.0 and 1.1 was removed in Go 1.22 (which was adopted in Tyk 5.3.6/5.6.0), so these are no longer supported by Tyk. + + + +### Supported TLS Cipher Suites + +The strength of encryption is determined by the cipher that is negotiated between client & server; each version of the TLS protocol provides a suite of available ciphers. + +TLS 1.3 protocol does not allow the setting of custom ciphers, and is designed to automatically pick the most secure cipher. + +When using earlier TLS protocols, you can deliberately choose the ciphers to be used using the `http_server_options` config option `ssl_ciphers` in `tyk.conf` and `tyk-analytics.conf`. This takes an array of strings as its value. Each string must be one of the allowed cipher suites as defined at https://golang.org/pkg/crypto/tls/#pkg-constants + +For example: + +```json +{ + "http_server_options": { + "ssl_ciphers": ["TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"], + } +} +``` + +If no ciphers match, Tyk will default to golang crypto/tls standard ciphers. + +```text +"ssl_ciphers": ["TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"] + +SSL-Session: + Protocol : TLSv1.2 + Cipher : ECDHE-RSA-AES128-SHA256 + Session-ID: 8246BAFF7396BEDE71FD5AABAD493A1DD2CAF4BD70BA9A816AD2969CFD3EAA98 + Session-ID-ctx: + Master-Key: 3BB6A2623FCCAD272AE0EADFA168F13FDAC83CEAFCA232BD8A8B68CEACA373552BE5340A78672A116A908E61EEF0AD29 +``` + +```text +"ssl_ciphers": ["TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"] + +2018/06/22 18:15:00 http: TLS handshake error from 127.0.0.1:51187: tls: no cipher suite supported by both client and server + +SSL-Session: + Protocol : TLSv1.2 + Cipher : 0000 + Session-ID: + Session-ID-ctx: + Master-Key: + Start Time: 1529687700 + Timeout : 7200 (sec) + Verify return code: 0 (ok) +``` + +```text +"ssl_ciphers": ["junk or empty"] + +SSL-Session: + Protocol : TLSv1.2 + Cipher : ECDHE-RSA-AES256-GCM-SHA384 + Session-ID: A6CFF2DCCE2344A59D877872F89BDC9C9B2F15E1BBAE8C7926F32E15F957AA2B + Session-ID-ctx: + Master-Key: 88D36C895808BDF9A5481A8CFD68A0B821CF8E6A6B8C39B40DB22DA82F6E2E791C77A38FDF5DC6D21AAE3D09825E4A2A +``` + + +### Validate Hostname against Common Name + +From v2.9.3 you can force the validation of the hostname against the common name, both at the Gateway level via your `tyk.conf` and at the API level. + + + + +Set `ssl_force_common_name_check` to `true` in your `tyk.conf` file. + + + +Use `proxy.transport.ssl_force_common_name_check` in your API definition. + + + + +### Dynamically setting SSL certificates for custom domains + +If you include certificateID or certificate path to an API definition `certificates` field, Gateway will dynamically load this ceritficate for your custom domain, so you will not need to restart the process. You can do it from the Dashboard UI too, in the custom domain section. + + + + +Let say the domain certificate is stored in secret named `my-test-tls` in the same namespace as this ApiDefinition resource `httpbin`. You can provide the domain certificate in `certificate_secret_names` field. Tyk Operator will help you retrieve the certificate from secret and upload it to Tyk. + +```yaml{linenos=true, linenostart=1, hl_lines=["10-11"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: https + listen_port: 8443 + certificate_secret_names: + - my-test-tls + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + + + +You can also manage custom domain certificates using Kubernetes secrets in Tyk OAS. + +Example of Defining Custom Domain Certificates + +```yaml{linenos=true, linenostart=1, hl_lines=["50-51"]} +# Secret is not created in this manifest. +# Please store custom domain certificate in a kubernetes TLS secret `custom-domain-secret`. +apiVersion: v1 +data: + test_oas.json: |- + { + "info": { + "title": "Petstore with custom domain", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": {}, + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "Petstore with custom domain", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://petstore.swagger.io/v2" + }, + "server": { + "listenPath": { + "value": "/petstore/", + "strip": true + } + } + } + } +kind: ConfigMap +metadata: + name: cm + namespace: default +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: petstore-with-customdomain +spec: + tykOAS: + configmapRef: + name: cm + namespace: default + keyName: test_oas.json + customDomain: + enabled: true + name: "buraksekili.dev" + certificatesRef: + - custom-domain-secret +``` + +This example shows how to enable a custom domain (`buraksekili.dev`) with a TLS certificate stored in a Kubernetes secret (`custom-domain-secret`). + + + + + +## Certificate Management + +Tyk provides two options to manage certificates: plain files or certificate storage with a separate API. + +All configuration options, which require specifying certificates, support both plain file paths or certificate IDs. You are able to mix them up, and Tyk will automatically distinguish file names from certificate IDs. + +The Tyk Gateway and Dashboard Admin APIs provide endpoints to create, remove, list, and see information about certificates. For the Gateway, the endpoints are: + +* Create: `POST /tyk/certs` with PEM body. Returns `{"id": "", ... }` +* Delete: `DELETE /tyk/certs/` +* Get info: `GET /tyk/certs/`. Returns meta info about the certificate, something similar to: +```json +{ + "id": "", + "fingerprint": , + "has_private_key": false, + "issuer": , + "subject": "", ... +} +``` +* Get info about multiple certificates: `GET /tyk/certs/,,`. +Returns array of meta info objects, similar to above. +* List all certificate IDs: `GET /tyk/certs`. Returns something similar to: + +```json +{ "certs": "", "", ... } +``` + +The Dashboard Admin API is very similar, except for a few minor differences: + +* Endpoints start with `/api` instead of `/tyk`, e.g. `/api/certs`, `/api/certs/`, etc. +* All certificates are managed in the context of the organization. In other words, certificates are not shared between organizations. + +Certificate storage uses a hex encoded certificate SHA256 fingerprint as its ID. When used with the Dashboard API, Tyk additionally appends the organization id to the certificate fingerprint. It means that certificate IDs are predictable, and you can check certificates by their IDs by manually +generating certificate SHA256 fingerprint using the following command: + +```{.copyWrapper} +openssl x509 -noout -fingerprint -sha256 -inform pem -in . +``` + +You may notice that you can't get the raw certificate back, only its meta information. This is to ensure security. Certificates with private keys have special treatment and are encoded before storing. If a private key is found it will be encrypted with AES256 algorithm 3 using the `security.private_certificate_encoding_secret` secret, defined in `tyk.conf` file. Otherwise, the certificate will use the [secret](/tyk-oss-gateway/configuration#secret) value in `tyk.conf`. + +### Using Tyk Certificate Storage + +In Tyk Gateway 2.4 and Tyk Dashboard 1.4 we added [Mutual TLS support](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls) including special Certificate storage, which is used to store all kinds of certificates from public to server certificates with private keys. + +In order to add new server certificates to the Gateway: + +1. Ensure that both private key and certificates are in PEM format +2. Concatenate Cert and Key files to single file +3. Go to "Certificates" section of the Tyk Dashboard, upload certificate, and you will get a unique ID response +4. Set it to the Tyk Gateway using one of the approaches below: + + * Using your `tyk.conf`: + + ``` + "http_server_options": { + "ssl_certificates": ["", ""] + } + ``` + + * Using environment variables (handy for Multi-Cloud installation and Docker in general): `TYK_GW_HTTPSERVEROPTIONS_SSLCERTIFICATES=` (if you want to set multiple certificates just separate them using a comma.) + + The Domain in this case will be extracted from standard certificate fields: `DNSNames`. + + + + + Prior to Tyk v5, the Domain could also be extracted from the now deprecated `Subject.CommonName` field. + + + +## Self Signed Certificates + +Self signed certificates can be managed in multiple ways. + +Best practice dictates that you store certificates in the standard certificate store on the local system, e.g. +`/etc/ssl/certs` + +For example, if you are using a self-signed cert on the Dashboard, in order for the Gateway to trust it, add it to the Gateway's certificate store in `/etc/ssl/certs` + +Alternatively, you can disable the verification of SSL certs in the component configurations below. **You shouln't do this in production!** + +### Creating a self-signed certificate pair +You can create self-signed client and server certificates with this command: +```{.copyWrapper} +openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes +``` + +For the server in `common name` specify a domain, or just pass `-subj "/CN=localhost"` to OpenSSL command. Then follow our [TLS and SSL Guide](/api-management/certificates). + +To get certificate SHA256 fingerprint use the following command: + +```{.copyWrapper} +openssl x509 -noout -fingerprint -sha256 -inform pem -in +``` + +If you are testing using cURL, your command will look like: + +```{.copyWrapper} +curl --cert client_cert.pem --key client_key.pem https://localhost:8181 +``` + +### Using self-signed certificates with Tyk Gateway + +You can set `http_server_options.ssl_insecure_skip_verify` to `true` in your tyk.conf to allow the use of self-signed certificates when connecting to the Gateway. + +### Using self-signed certificates with Tyk Dashboard + +You can set `http_server_options.ssl_insecure_skip_verify` to `true` in your tyk_analytics.conf to allow the use of self-signed certificates when connecting to the Dashboard. + + + +## Internal Proxy Setup + +From v2.9.3 you can also specify a custom proxy and set the minimum TLS versions and any SSL ciphers within your API definitions. See [Internal Proxy Setup](/api-management/gateway-config-tyk-classic#proxy-transport-settings) for more details. + + diff --git a/api-management/client-authentication.mdx b/api-management/client-authentication.mdx new file mode 100644 index 000000000..8cf44fcf3 --- /dev/null +++ b/api-management/client-authentication.mdx @@ -0,0 +1,370 @@ +--- +title: "Client Authentication and Authorization" +description: "Learn how to apply the most appropriate authentication method to secure access your APIs with Tyk. Here you will find everything there is to know about authenticating and authorizing API clients with Tyk." +keywords: "Authentication, Authorization, Tyk Authentication, Tyk Authorization, Secure APIs, client" +sidebarTitle: "Overview" +--- + +import { ResponsiveGrid } from '/snippets/ResponsiveGrid.mdx'; + +## Introduction + +Tyk Gateway sits between your clients and your services, securely routing requests and responses. For each API proxy that you expose on Tyk, you can configure a range of different methods that clients must use to identify (authenticate) themselves to Tyk Gateway when making a request to access the API. + +*Authentication* and *Authorization* are the processes that you use to control access to your APIs and protect your upstream services. Each serves a distinct purpose: + +* **Authentication** (or **AuthN**) is the process of confirming the identity of the user or system making the API request. This step validates "who" is attempting to access the API, commonly using credentials such as tokens, passwords, or certificates. + +* **Authorization** (or **AuthZ**) is the process that determines if the user or system has the right permissions to perform the requested action. This step defines "what" they are allowed to do based on assigned roles, scopes, or policies. + +Whilst AuthN and AuthZ are separate actions with different standards, they are often considered together under the topic of *Securing the API*. Together, these processes allow API providers to control access, safeguard data integrity, and meet security and compliance standards, making them vital for any API management strategy. + +--- + +## How does Tyk Implement Authentication and Authorization? + +The API request processing flow within Tyk Gateway consists of a [chain of middleware](/api-management/traffic-transformation#request-middleware-chain) that perform different checks and transformations on the request (headers, parameters and payload). Several dedicated **authentication middleware** are provided and there is also support for user-provided **custom authentication plugins**. Multiple authentication middleware can be chained together if required by the API's access security needs. *Note that it is not possible to set the order of chained auth methods.* + +The OpenAPI description can contain a list of [securitySchemes](https://spec.openapis.org/oas/v3.0.3.html#security-scheme-object) which define the authentication methods to be used for the API; the detailed configuration of the Tyk authentication middleware is set in the [server.authentication](/api-management/gateway-config-tyk-oas#authentication) section of the Tyk Vendor Extension. + +You must enable client authentication using the `server.authentication.enabled` flag and then configure the appropriate authentication method as indicated in the relevant section of this document. When creating a Tyk OAS API from an OpenAPI description, Tyk can automatically enable authentication based upon the content of the OpenAPI description as described [here](/api-management/gateway-config-managing-oas#importing-an-openapi-description-to-create-an-api). + +When using Tyk Classic APIs, each authentication middleware has its own fields within the API definition + +### Managing authorization data + +The data that the client provides with the API request used to authenticate with Tyk and confirm that it is authorized to access the API is often of no use to the upstream service and, depending on your security governance, may even be prohibited from being made available to the upstream. + +Tyk offers a simple option, separately configurable for each API to remove, or "strip", the authentication/authorization date from the incoming request before proxying to the upstream. + +This is controlled using the [server.authentication.stripAuthorizationData](/api-management/gateway-config-tyk-oas#authentication) field in the Tyk Vendor Extension (Tyk Classic: `strip_auth_data`). + +## What does Tyk Support? + +Tyk includes support for various industry-standard methods to secure your APIs. This page provides an overview of the options available, helping you to choose and implement what works best for you. + +Use Ctrl+F or the sidebar to find specific topics, for example β€œJWT” for JSON Web Tokens or β€œmTLS” for mutual TLS. + +You can also use the links below to jump directly to the appropriate sections to learn how to secure your APIs using Tyk. + + + + +Delegate authentication using one of the most widely used open standard protocols + + + +Securely transmit information between parties. + + + +Secure APIs with username and password credentials. + + + +Implement token-based authentication for API access. + + + +Establish secure channels with two-way certificate verification. + + + +Verify message integrity using shared secret keys. + + +{/* To be added + +Verify message integrity using shared secret certificates. + */} + + +Create custom plugins to implement specific authentication requirements. + + + +Allow unrestricted access for public APIs. + + + + + +--- + +## Other Authentication Methods + +### Integrate with External Authorization Server (deprecated) + + + +Tyk has previously offered two types of OAuth authentication flow; [Tyk as the authorization server]() and Tyk connecting to an external *auth server* via a dedicated *External OAuth* option. The dedicated external *auth server* option was deprecated in Tyk 5.7.0. +
+ +For third-party OAuth integration we recommend using the JSON Web Token (JWT) middleware which is described [here](/basic-config-and-security/security/authentication-authorization/json-web-tokens), which offers the same functionality with a more streamlined setup and reduced risk of misconfiguration. +
+ +The remainder of this section is left for reference and is not maintained. +
+ + +To call an API that is protected by OAuth, you need to have an access token from the third party IDP (it could be an opaque token or a JWT). + +For subsequent calls the access token is provided alongside the API call and needs to be validated. With JWT, Tyk can confirm the validity of the JWT with the secret provided in your config. The secret signs the JWT when created and confirms that none of its contents has changed. + +For this reason, information like the expiry date which are often set within the JWT cannot be changed after the JWT has been initially created and signed. This means you are not able to revoke a token before the expiry set in the JWT with the standard JWT flow. With OAuth you can use [OAuth introspection](https://www.rfc-editor.org/rfc/rfc7662) to overcome this. With introspection, you can validate the access token via an introspection endpoint that validates the token. + +Let’s see how external OAuth middleware is configured. + +#### OAS contract + +```yaml +externalOAuthServer: + enabled: true, + providers: # only one item in the array for now (we're going to support just one IDP config in the first iteration) + - jwt: #validate JWTs generated by 3rd party Oauth servers (like Okta) + enabled: true + signingMethod: HMAC/RSA/ECDSA # to verify signing method used in jwt + source: key # secret to verify signature + issuedAtValidationSkew: 0 + notBeforeValidationSkew: 0 + expiresAtValidationSkew: 0 + identityBaseField: # identity claimName + introspection: # array for introspection details + enabled: true/false + clientID: # for introspection request + clientSecret: # for introspection request, if empty will use oAuth.secret + url: # token introspection endpoint + cache: # Tyk will cache the introspection response when `cache.enabled` is set to `true` + enabled: true/false, + timeout: 0 # The duration (in seconds) for which Tyk will retain the introspection outcome in its cache. If the value is "0", it indicates that the introspection outcome will be stored in the cache until the token's expiration. + identityBaseField: # identity claimName +``` + +#### Tyk Classic API definition contract + +```yaml +"external_oauth": { + "enabled": true, + "providers": [ + { + "jwt": { + "enabled": false, + "signing_method": rsa/ecdsa/hmac, + "source": # jwk url/ base64 encoded static secret / base64 encoded jwk url + "identity_base_field": # identity claim name + "expires_at_validation_skew": # validation skew config for exp + "not_before_validation_skew": # validation skew config for nbf + "issued_at_validation_skew" : # validation skew config for iat + }, + "introspection": { + "enabled": true, + "url": # introspection endpoint url + "client_id": # client Id used for introspection + "client_secret": # client secret to be filled here (plain text for now, TODO: decide on a more secure mechanism) + "identity_base_field": # identity claim name + "cache": { + "enabled": true, + "timeout": # timeout in seconds + } + } + } + ] +} +``` +- `externalOAuthServer` set `enabled` to `true` to enable the middleware. +- `providers` is an array of multiple IDP configurations, with each IDP config being an element in the `providers` array. +- You can use this config to use JWT self validation using `jwt` or use introspection via `instropection` in the `providers` section . + + + + + For now, you’ll be limiting `providers` to have only one element, ie one IDP configured. + + + +#### JWT + +There could be cases when you don’t need to introspect a JWT access token from a third party IDP, and instead you can just validate the JWT. This is similar to existing JWT middleware, adding it in External OAuth middleware for semantic reasons. + +- `enabled` - enables JWT validation. +- `signingMethod` - specifies the signing method used to sign the JWT. +- `source` - the secret source, it can be one of: + - a base64 encoded static secret + - a valid JWK url in plain text + - a valid JWK url in base64 encoded format +- `issuedAtValidationSkew` , `notBeforeValidationSkew`, `expiresAtValidationSkew` can be used to [configure clock skew](/api-management/authentication/jwt-claim-validation#clock-skew-configuration) for json web token validation. +- `identityBaseField` - the identity key name for claims. If empty it will default to `sub`. + +##### Example: Tyk OAS API definition with JWT validation enabled + +```json +"securitySchemes": { + "external_jwt": { + "enabled": true, + "header": { + "enabled": true, + "name": "Authorization" + }, + "providers": [ + { + "jwt": { + "enabled": true, + "signingMethod": "hmac", + "source": "dHlrLTEyMw==", + "identityBaseField": "sub" + } + } + ] + } +} +``` + +##### Example: Tyk Classic API definition with JWT validation enabled + +```json +"external_oauth": { + "enabled": true, + "providers": [ + { + "jwt": { + "enabled": true, + "signing_method": "hmac", + "source": "dHlrLTEyMw==", + "issued_at_validation_skew": 0, + "not_before_validation_skew": 0, + "expires_at_validation_skew": 0, + "identity_base_field": "sub" + }, + "introspection": { + "enabled": false, + "url": "", + "client_id": "", + "client_secret": "", + "identity_base_field": "", + "cache": { + "enabled": false, + "timeout": 0 + } + } + } + ] +} +``` +#### Introspection + +For cases where you need to introspect the OAuth access token, Tyk uses the information in the `provider.introspection` section of the contract. This makes a network call to the configured introspection endpoint with the provided `clientID` and `clientSecret` to introspect the access token. + +- `enabled` - enables OAuth introspection +- `clientID` - clientID used for OAuth introspection, available from IDP +- `clientSecret` - secret used to authenticate introspection call, available from IDP +- `url` - endpoint URL to make the introspection call +- `identityBaseField` - the identity key name for claims. If empty it will default to `sub`. + +##### Caching + +Introspection via a third party IdP is a network call. Sometimes it may be inefficient to call the introspection endpoint every time an API is called. Caching is the solution for this situation. Tyk caches the introspection response when `enabled` is set to `true` inside the `cache` configuration of `introspection`. Then it retrieves the value from the cache until the `timeout` value finishes. However, there is a trade-off here. When the timeout is long, it may result in accessing the upstream with a revoked access token. When it is short, the cache is not used as much resulting in more network calls. + +The recommended way to handle this balance is to never set the `timeout` value beyond the expiration time of the token, which would have been returned in the `exp` parameter of the introspection response. + +See the example introspection cache configuration: + +```yaml +"introspection": { + ... + "cache": { + "enabled": true, + "timeout": 60 // in seconds + } +} +``` +##### Example: Tyk OAS API definition external OAuth introspection enabled + +```json +"securitySchemes": { + "keycloak_oauth": { + "enabled": true, + "header": { + "enabled": true, + "name": "Authorization" + }, + "providers": [ + { + "introspection": { + "enabled": true, + "url": "http://localhost:8080/realms/tyk/protocol/openid-connect/token/introspect", + "clientId": "introspection-client", + "clientSecret": "DKyFN0WXu7IXWzR05QZOnnSnK8uAAZ3U", + "identityBaseField": "sub", + "cache": { + "enabled": true, + "timeout": 3 + } + } + } + ] + } +} +``` +##### Example: Tyk Classic API definition with external OAuth introspection enabled + +```json +"external_oauth": { + "enabled": true, + "providers": [ + { + "jwt": { + "enabled": false, + "signing_method": "", + "source": "", + "issued_at_validation_skew": 0, + "not_before_validation_skew": 0, + "expires_at_validation_skew": 0, + "identity_base_field": "" + }, + "introspection": { + "enabled": true, + "url": "http://localhost:8080/realms/tyk/protocol/openid-connect/token/introspect", + "client_id": "introspection-client", + "client_secret": "DKyFN0WXu7IXWzR05QZOnnSnK8uAAZ3U", + "identity_base_field": "sub", + "cache": { + "enabled": true, + "timeout": 3 + } + } + } + ] +} +``` + +### Integrate with OpenID Connect (deprecated) + + + +Tyk has previously offered a dedicated OpenID Connect option for client authentication, but this was not straightforward to use and was deprecated in Tyk 5.7.0. +
+ +For integration with a third-party OIDC provider we recommend using the JSON Web Token (JWT) middleware which is described [above](/basic-config-and-security/security/authentication-authorization/json-web-tokens), which offers the same functionality with a more streamlined setup and reduced risk of misconfiguration. +
+ +The remainder of this section is left for reference and is not maintained. +
+ + + +[OpenID Connect](https://openid.net/developers/how-connect-works) (OIDC) builds on top of OAuth 2.0, adding authentication. You can secure your APIs on Tyk by integrating with any standards compliant OIDC provider using [JSON Web Tokens](/basic-config-and-security/security/authentication-authorization/json-web-tokens) (JWTs). +JWTs offer a simple way to use the third-party Identity Provider (IdP) without needing any direct integration between the Tyk and 3rd-party systems. + +To integrate a 3rd party OAuth2/OIDC IdP with Tyk, all you will need to do is ensure that your IdP can issue OAuth2 JWT access tokens as opposed to opaque tokens. + +The client application authenticates with the IdP which then provides an access token that is accepted by Tyk. Tyk will take care of the rest, ensuring that the rate limits and quotas of the underlying identity of the bearer are maintained across JWT token re-issues, so long as the "sub" (or whichever identity claim you chose to use) is available and consistent throughout and the policy that underpins the security clearance of the token exists too. + + + +## Conclusion + +Securing your APIs is a foundational step toward managing data integrity and access control effectively. Now that you've configured authentication and authorization, the next steps in your API journey with Tyk should involve: + +Defining Access Policies: Use Tyk’s policies to refine API access controls, rate limits, and quotas. This lets you align your security model with business needs and enhance user experience through granular permissions. You can learn more about policies [here](/api-management/policies). + +Exploring API Analytics: Leverage Tyk’s analytics to monitor access patterns, track usage, and gain insights into potential security risks or high-demand endpoints. Understanding usage data can help in optimizing API performance and enhancing security measures. You can learn more about analytics [here](/api-management/dashboard-configuration#analyzing-api-traffic-activity). \ No newline at end of file diff --git a/api-management/cloud/audit-logs.mdx b/api-management/cloud/audit-logs.mdx new file mode 100644 index 000000000..958a58a68 --- /dev/null +++ b/api-management/cloud/audit-logs.mdx @@ -0,0 +1,54 @@ +--- +title: "Configure Audit Logs in Tyk Cloud" +description: "Learn how to set up and manage audit logs in Tyk Cloud Control Plane deployments." +keywords: "Audit Logs, Tyk Cloud, Control Plane, Data Plane" +sidebarTitle: "Audit Logs" +--- + +## Introduction + +Tyk Cloud provides comprehensive audit logging capabilities to track and monitor all administrative actions performed within your Tyk Dashboard. This feature is essential for compliance and security. + +## What are Audit Logs? + +Audit logs capture detailed records of all requests made to endpoints under the `/api` route in your Tyk Dashboard. These logs include information about: + +- User actions and administrative operations +- API changes and configurations +- Authentication and authorisation events +- System access and modifications +- Response status codes and timestamps + +## Enabling Audit Logs for Control Plane Deployments + + + +The audit log feature is available for Control Plane versions v5.7.0 or later. + + + +### How to Enable Audit Logging + +1. **Contact Your Account Manager**: Audit logging must be enabled at the subscription level. Reach out to your Tyk account manager to add this feature to your plan. + +2. **Enable via Tyk Cloud UI**: Once the feature is available in your subscription, you can enable audit logging directly from the Tyk Cloud console: + - Navigate to your Control Plane deployment + - Select **Edit** from the deployment options + - Enable the **Audit Logging** option + - Save and redeploy your Control Plane + +Audit logs will be stored in your Control Plane's database for easy access and management. + +### Viewing and Accessing Audit Logs + +Once audit logging is enabled, you can retrieve the logs via the Tyk Dashboard API. + +For details on the API endpoints and usage, see [Retrieving Audit Logs via API](/api-management/dashboard-configuration#retrieving-audit-logs-via-api). + +## Storage Size Caps + +Tyk Cloud enforces audit log storage size caps based on your subscription terms: + +- **Storage Limits**: A size cap is applied to audit logs based on your subscription plan +- **Automatic Cleanup**: When the storage limit is reached, the oldest logs are automatically removed to make space for new entries. + diff --git a/api-management/custom-auth-with-proxy-identity-provider.mdx b/api-management/custom-auth-with-proxy-identity-provider.mdx new file mode 100644 index 000000000..f5d54efcb --- /dev/null +++ b/api-management/custom-auth-with-proxy-identity-provider.mdx @@ -0,0 +1,48 @@ +--- +title: "Custom Authentication with Proxy Identify Provider" +description: "Learn how to integrate external services with Tyk API Gateway. Discover how to use middleware plugins, webhooks, and service discovery to extend your API functionality and connect with third-party systems." +keywords: "Tyk Identity Broker, TIB, Identity Provider, Identity Handler, SSO, Custom Authentication, Custom Proxy Provder" +sidebarTitle: "Custom Authentication" +--- + +The proxy identity provider is a generic solution to more legacy problems, as well as a way to handle flows such as basic auth access with third party providers or OAuth password grants where the request can just be passed through to the providing endpoint to return a direct response. + +The proxy provider will take a request, proxy it to an upstream host, capture the response, and analyze it for triggers of "success", if the triggers come out as true, then the provider will treat the request as authenticated and hand over to the Identity Handler to perform whatever action is required with the user data. + +Success can be triggered using three methods: + +1. Response code: e.g. if this is an API request, a simple `200` response would suffice to act as a successful authentication. +2. Response body exact match: You can have a base64 encoded body that you would expect as a successful match, if the two bodies are the same, then the request will be deemed successful. +3. Regex: Most likely, the response might be dynamic (and return a response code, timestamp or other often changing parameter), in which case you may want to just match the response to a regex. + +These can be used in conjunction as gates, e.g. a response must be `200 OK` and match the regex in order to be marked as successful. + +## JSON Data and User names + +The Proxy provider can do some clever things, such as extract JSON data from the response and decode it, as well as pull username data from the Basic Auth header (for example, if your identity provider supports dynamic basic auth). + +## Log into the Dashboard with the Proxy Provider + +The configuration below will proxy a request to `http://{TARGET-HOSTNAME}:{PORT}/` and evaluate the response status code, if the status code returned is `200` then TIB will assume the response is JSON (`"ResponseIsJson": true`) to extract an access token (e.g. if this is an OAuth pass-through request) and try and find an identity to bind the Dashboard user to in the `user_name` JSON field of the response object (`"UsernameField": "user_name"`): + +```{.copyWrapper} +{ + "ActionType": "GenerateOrLoginUserProfile", + "ID": "7", + "OrgID": "{YOUR-ORG-ID}", + "ProviderConfig": { + "AccessTokenField": "access_token", + "ExtractUserNameFromBasicAuthHeader": false, + "OKCode": 200, + "OKRegex": "", + "OKResponse": "", + "ResponseIsJson": true, + "TargetHost": "http://{TARGET-HOSTNAME}:{PORT}/", + "UsernameField": "user_name" + }, + "ProviderName": "ProxyProvider", + "ReturnURL": "http://{DASH-DOMAIN}:{DASH-PORT}/tap", + "Type": "redirect" +} +``` + diff --git a/api-management/dashboard-configuration.mdx b/api-management/dashboard-configuration.mdx new file mode 100644 index 000000000..8adf3e85e --- /dev/null +++ b/api-management/dashboard-configuration.mdx @@ -0,0 +1,5367 @@ +--- +title: "Manage the Tyk Dashboard" +description: "How to manage users, teams, permissions, RBAC in Tyk Dashboard" +keywords: "Manage Tyk Dashboard, User Management, RBAC, Role Based Access Control, User Groups, Teams, Permissions, API Ownership, SSO, Single Sign On, Multi Tenancy" +sidebarTitle: "Manage Tyk Dashboard" +--- + +import OpaRules from '/snippets/opa-rules.mdx'; +import MongodbVersionsInclude from '/snippets/mongodb-versions-include.mdx'; +import SqlVersionsInclude from '/snippets/sql-versions-include.mdx'; +import { ButtonLeft } from '/snippets/ButtonLeft.mdx'; + +## Introduction + +Tyk Dashboard diagram + +The Tyk Dashboard is a powerful web-based interface that serves as the **central management hub for your API ecosystem**. It provides a user-friendly Graphical User Interface (GUI) for configuring, monitoring, and analyzing your APIs managed by Tyk. + +The Dashboard also exposes a **REST API**, allowing for programmatic control and integration with other tools and workflows. + +This page introduces general features of dashboard and how to configure them. If you are looking for global configurations of the Dashboard deployment refer this [config file](/tyk-dashboard/configuration). + +We will delve into the following key topics: + +1. **[Exploring the Dasbhoard UI](#exploring-the-dashboard-ui)**: A tour of the Dashboard UI. + +2. **[Exploring the Dasbhoard API](#exploring-the-dashboard-api)**: Explore the Dashboard APIs, including their classification, authentication methods, and usage examples with Swagger and Postman collections. + +3. **[API Management using API Endpoint Designer](#exploring-api-endpoint-designer)**: A graphical environment for configuring your Tyk APIs. + +4. **[Monitoring and Traffic Analytics](#traffic-analytics)**: Exploration of Tyk's traffic analytics capabilities, including logging mechanisms, error tracking, endpoint analysis, and various activity type measurements. + +5. **[API Governance using API Templates and API Categories](#governance-using-api-categories)** + +6. **[System Management](#system-administration)**: Detailed overview of Tyk's system management capabilities, including Admin API functionalities, organization management and configuting audit logs. + +7. **[Supported Database](#supported-database)**: We will examine Dashboard's storage requirement, compatible database versions and how to configure them. + +7. **[Exploring Data Storage Solution](#data-storage-solutions)**: We will explore Dashboard's multi-layered storage architecture and understand how to configure each storage tier effectively. + +## Exploring the Dashboard UI + +To get a tour of the Dashboard UI, refer to this [document](/getting-started/using-tyk-dashboard). + +## Exploring the Dashboard API + +The Dashboard is a large, granular REST API with a thin-client web front-end, and if being deployed as part of a Tyk install, serves as the main integration point instead of the Gateway API. + +API Overview + +**The Dashboard API is a superset of the Gateway API**, providing the same functionality, with additional features (anything that can be done in the Dashboard has an API endpoint), and offers some additional advantages: + - The Dashboard API has a granular structure, you can create separate clients easily. + - The API features read/write permissions on a per-endpoint level to have extra control over integrations. + - The API enforces a schema that can be modified and hardened depending on your usage requirements. + +### Types of Dashboard API + +The Dashboard exposes two APIs: + - **Dashboard API**: Is used for operational management of Tyk resources (APIs, policies, keys, etc.). This API offers granular permissions based on user roles. + + To know more about Dashboard APIs, refer the following documents: + - [Postman / Swagger / Open API specification](/tyk-dashboard-api) + - [Dashboard API Usage Examples](#dashboard-api-resources-and-usage) + + - **Dashboard Admin API**: Is used for system-level administration and initial setup tasks like managing organizations, initial user creation, backups/migrations and SSO setup. + + To know more about Dashboard Admin APIs, refer the following documents: + - [Postman / Swagger / Open API specification](/dashboard-admin-api) + - [Dashboard Admin API Usage Examples](#dashboard-admin-api-resources-and-usage) + +### Authenticating with Dashboard APIs + +**Dashboard API** + +The [Tyk Dashboard API](/tyk-dashboard-api) is secured using an `Authorization` header that must be added to each request that is made. The **Tyk Dashboard API Access Credentials** `Authorization` key can be found within the Dashboard UI at the bottom of the **Edit User** section for a user. + +**Dashboard Admin API** + +The Tyk Dashboard Admin API is secured using a shared secret that is set in the `tyk_analytics.conf` file. Calls to the Admin API require the `admin-auth` header to be provided, to differentiate the call from a regular Dashboard API call. + +## Dashboard API Resources and Usage + +### Overview + +The [Tyk Dashboard API](/tyk-dashboard-api) is a superset of the Tyk Gateway API, enabling (almost) all of the core features and adding many more. The Dashboard API is also more granular and supports [Role Based Access Control](/api-management/user-management#) (RBAC) on both a multi-tenant, and user basis. + +Using the Dashboard API it is possible to set Read / Write / ReadWrite / Deny access to sections of the API on a user by user basis, and also segregate User / Key / API Ownership by organization. + +The availability of RBAC varies depending on the license or subscription. For further information, please check our [price comparison](https://tyk.io/price-comparison/) or consult our sales and expert engineers + + + +For optimal results, it is advisable to exclusively employ the Tyk Dashboard API (avoiding direct calls to the Tyk Gateway API) within a Self-Managed setup, enabling the Dashboard to manage the Tyk API gateways cluster. + + + + +Tyk Dashboard API security + +### Pagination + +Selected Dashboard APIs can be paginated. + +You can select the number of result pages to return by adding a parameter `p` which starts at `1`. At the default page size, this returns items 1-10. Setting `p` to `2` returns items 11-20 and so on. Alternatively, passing `0` or lower as a parameter will return all items. + +The default page size is 10. You can overwrite the default page size in your `tyk_analytics.conf` using the `page_size` key. It's suggested you do not modify it as it will affect the performance of the Dashboard. + +**Sample Request:** + +```http +GET /api/apis/?p=1 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response:** + +```yaml +{ + "apis": [ + { ... }, + { ... }, + { ... } + ], + "pages": 1 +} +``` + +### Manage APIs - API Definition + + + + +See [API Definition Objects](/api-management/gateway-config-tyk-classic) section for an explanation of each field in the request & response. + + + +#### Get List of APIs + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/apis/` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/apis?p=0 HTTP/1.1 +Host: localhost:3000 +authorization: 7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +```yaml +{ + "apis": [ + { + "api_model": {}, + "api_definition": { + "id": "54b53e47eba6db5c70000002", + "name": "Nitrous Test", + "api_id": "39d2c98be05c424371c600bd8b3e2242", + "org_id": "54b53d3aeba6db5c35000002", + "use_keyless": false, + "use_oauth2": false, + "oauth_meta": { + "allowed_access_types": [], + "allowed_authorize_types": [ + "token" + ], + "auth_login_redirect": "" + }, + "auth": { + "auth_header_name": "authorization" + }, + "use_basic_auth": false, + "notifications": { + "shared_secret": "", + "oauth_on_keychange_url": "" + }, + "enable_signature_checking": false, + "definition": { + "location": "header", + "key": "" + }, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "expires": "", + "paths": { + "ignored": [], + "white_list": [], + "black_list": [] + }, + "use_extended_paths": false, + "extended_paths": { + "ignored": [], + "white_list": [], + "black_list": [] + } + } + } + }, + "proxy": { + "listen_path": "/39d2c98be05c424371c600bd8b3e2242/", + "target_url": "http://tyk.io", + "strip_listen_path": true + }, + "custom_middleware": { + "pre": null, + "post": null + }, + "session_lifetime": 0, + "active": true, + "auth_provider": { + "name": "", + "storage_engine": "", + "meta": null + }, + "session_provider": { + "name": "", + "storage_engine": "", + "meta": null + }, + "event_handlers": { + "events": {} + }, + "enable_batch_request_support": false, + "enable_ip_whitelisting": false, + "allowed_ips": [], + "expire_analytics_after": 0 + }, + "hook_references": [] + } + ... + ], + "pages": 0 +} +``` + +#### Search APIs by name + +| **Property** | **Description** | +| :------------ | :------------------ | +| Resource URL | `/api/apis/search` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/apis?q=Some+Name HTTP/1.1 +Host: localhost:3000 +authorization: 7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +Similar to API list response + +#### Retrieve a single API by ID + +| **Property** | **Description** | +| :------------ | :---------------- | +| Resource URL | `/api/apis/{id}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + + + +`{id}` can either be the internal or public ID ( see `api_id` in the sample response ) + + + +**Sample request** + +```http +GET /api/apis/54c24242eba6db1c9a000002 HTTP/1.1 +Host: localhost +authorization: 7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +```json +{ + "api_model": {}, + "api_definition": { + "id": "54c24242eba6db1c9a000002", + "name": "Test", + "api_id": "bc2f8cfb7ab241504d9f3574fe407499", + "org_id": "54b53d3aeba6db5c35000002", + "use_keyless": false, + "use_oauth2": false, + "oauth_meta": { + "allowed_access_types": [], + "allowed_authorize_types": [ + "token" + ], + "auth_login_redirect": "" + }, + "auth": { + "auth_header_name": "authorization" + }, + "use_basic_auth": false, + "notifications": { + "shared_secret": "", + "oauth_on_keychange_url": "" + }, + "enable_signature_checking": false, + "definition": { + "location": "header", + "key": "" + }, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "expires": "", + "paths": { + "ignored": [], + "white_list": [], + "black_list": [] + }, + "use_extended_paths": true, + "extended_paths": { + "ignored": [ + { + "path": "/test-path/", + "method_actions": { + "GET": { + "action": "no_action", + "code": 200, + "data": "", + "headers": {} + } + } + }, + { + "path": "/test-path/reply", + "method_actions": { + "GET": { + "action": "reply", + "code": 200, + "data": "{\"foo\":\"bar\"}", + "headers": { + "x-test": "test" + } + } + } + } + ], + "white_list": [], + "black_list": [] + } + } + } + }, + "proxy": { + "listen_path": "/bc2f8cfb7ab241504d9f3574fe407499/", + "target_url": "http://httpbin.org/", + "strip_listen_path": true + }, + "custom_middleware": { + "pre": [], + "post": [] + }, + "session_lifetime": 0, + "active": true, + "auth_provider": { + "name": "", + "storage_engine": "", + "meta": null + }, + "session_provider": { + "name": "", + "storage_engine": "", + "meta": null + }, + "event_handlers": { + "events": { + "QuotaExceeded": [ + { + "handler_name": "eh_web_hook_handler", + "handler_meta": { + "_id": "54be6c0beba6db07a6000002", + "event_timeout": 60, + "header_map": { + "x-tyk-test": "123456" + }, + "method": "POST", + "name": "Test Post", + "org_id": "54b53d3aeba6db5c35000002", + "target_path": "http://httpbin.org/post", + "template_path": "" + } + } + ] + } + }, + "enable_batch_request_support": true, + "enable_ip_whitelisting": true, + "allowed_ips": [ + "127.0.0.1" + ], + "expire_analytics_after": 0 + }, + "hook_references": [ + { + "event_name": "QuotaExceeded", + "event_timeout": 60, + "hook": { + "api_model": {}, + "id": "54be6c0beba6db07a6000002", + "org_id": "54b53d3aeba6db5c35000002", + "name": "Test Post", + "method": "POST", + "target_path": "http://httpbin.org/post", + "template_path": "", + "header_map": { + "x-tyk-test": "123456" + }, + "event_timeout": 0 + } + } + ] +} +``` + + +#### Delete API by ID + +**Sample Request** + +```http +DELETE /api/apis/54c24242eba6db1c9a000002 HTTP/1.1 +Host: localhost +Authorization: 7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +```json +{ + "Status":"OK", + "Message":"API deleted", + "Meta":null +} +``` + +#### Create API Definition + +Creating API definitions is slightly different to the core API, API definitions are wrapped inside an `api_definition` field and event handlers, such as webhooks are not embedded in the main `api_defintion` object (though they can be), webhooks are instead appended as references into the `hook_references` field, the API will embed the correct webhook data into the event handler interface. + +Please note that ID's (both `id` and `api_id`) are auto-generated by Tyk and cannot be set by the user. In Self-Managed installations `api_id` can be overwritten with a call to the Update API Definition endpoint, but this is currently not possible when the Dashboard resides in Tyk Cloud. + +| **Property** | **Description** | +| :------------ | :----------------------- | +| Resource URL | `/api/apis/` | +| Method | POST | +| Type | None | +| Body | Advanced API Definition | +| Param | None | + +**Sample Request** + +```http +POST /api/apis HTTP/1.1 +Host: localhost:3000 +Connection: keep-alive +Content-Type: application/json +Content-Length: 1356 +authorization: 7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "api_definition": { + "name": "Test", + "auth": { + "auth_header_name": "authorization" + }, + "definition": { + "location": "header", + "key": "" + }, + "proxy": { + "target_url": "http://httpbin.org/" + }, + "version_data": { + "use_extended_paths": true, + "not_versioned": true, + "versions": { + "Default": { + "expires": "", + "name": "Default", + "paths": { + "ignored": [], + "white_list": [], + "black_list": [] + }, + "extended_paths": { + "ignored": [ + { + "path": "/test-path/", + "method_actions": { + "GET": { + "action": "no_action", + "code": 200, + "data": "", + "headers": {} + } + } + }, + { + "path": "/test-path/reply", + "method_actions": { + "GET": { + "action": "reply", + "code": 200, + "data": "{\"foo\":\"bar\"}", + "headers": { + "x-test": "test" + } + } + } + } + ], + "white_list": [], + "black_list": [] + }, + "use_extended_paths": true + } + } + }, + "use_oauth2": false, + "oauth_meta": { + "auth_login_redirect": "", + "allowed_access_types": [], + "allowed_authorize_types": [ + "token" + ] + }, + "notifications": { + "shared_secret": "", + "oauth_on_keychange_url": "" + }, + "enable_ip_whitelisting": true, + "allowed_ips": [ + "127.0.0.1" + ], + "use_keyless": false, + "enable_signature_checking": false, + "use_basic_auth": false, + "active": true, + "enable_batch_request_support": true + }, + "hook_references": [ + { + "event_name": "QuotaExceeded", + "hook": { + "api_model": {}, + "id": "54be6c0beba6db07a6000002", + "org_id": "54b53d3aeba6db5c35000002", + "name": "Test Post", + "method": "POST", + "target_path": "http://httpbin.org/post", + "template_path": "", + "header_map": { + "x-tyk-test": "123456" + }, + "event_timeout": 0 + }, + "event_timeout": 60 + } + ] +} +``` + +**Sample Response** + +```json +{ + "Status": "OK", + "Message": "API created", + "Meta": "54c24242eba6db1c9a000002" +} +``` + +Please note that Tyk matches the Ignored paths in the order in which they are specified in the `ignored` array. Subpaths of a route are matched automatically and so should be placed above parent routes if they need to be matched individually. + +#### Update API Definition + +APIs that are created using the advanced Dashboard API are referenced by their internal ID instead of their API-ID. + +Please note that whilst `api_id` can be updated for Self-Managed installations, this is currently not possible when the Dashboard resides in Tyk Cloud. Updates to `api_id` in Tyk Cloud will be ignored. + +| **Property** | **Description** | +| :------------ | :------------------------------------- | +| Resource URL | `/api/apis/{internal_or_external_id}` | +| Method | PUT | +| Type | None | +| Body | Advanced API Definition | +| Param | None | + +**Sample Request** + +```http +PUT /api/apis/54c24242eba6db1c9a000002 HTTP/1.1 +Host: localhost:3000 +Connection: keep-alive +Content-Type: application/json +Content-Length: 1356 +authorization: 7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "api_definition": { + "id": "54c24242eba6db1c9a000002", + "api_id": "bc2f8cfb7ab241504d9f3574fe407499", + "name": "Test", + "auth": { + "auth_header_name": "authorization" + }, + "definition": { + "location": "header", + "key": "" + }, + "proxy": { + "target_url": "http://httpbin.org/" + }, + "version_data": { + "use_extended_paths": true, + "not_versioned": true, + "versions": { + "Default": { + "expires": "", + "name": "Default", + "paths": { + "ignored": [], + "white_list": [], + "black_list": [] + }, + "extended_paths": { + "ignored": [ + { + "path": "/test-path/", + "method_actions": { + "GET": { + "action": "no_action", + "code": 200, + "data": "", + "headers": {} + } + } + }, + { + "path": "/test-path/reply", + "method_actions": { + "GET": { + "action": "reply", + "code": 200, + "data": "{\"foo\":\"bar\"}", + "headers": { + "x-test": "test" + } + } + } + } + ], + "white_list": [], + "black_list": [] + }, + "use_extended_paths": true + } + } + }, + "use_oauth2": false, + "oauth_meta": { + "auth_login_redirect": "", + "allowed_access_types": [], + "allowed_authorize_types": [ + "token" + ] + }, + "notifications": { + "shared_secret": "", + "oauth_on_keychange_url": "" + }, + "enable_ip_whitelisting": true, + "allowed_ips": [ + "127.0.0.1" + ], + "use_keyless": false, + "enable_signature_checking": false, + "use_basic_auth": false, + "active": true, + "enable_batch_request_support": true + }, + "hook_references": [ + { + "event_name": "QuotaExceeded", + "hook": { + "api_model": {}, + "id": "54be6c0beba6db07a6000002", + "org_id": "54b53d3aeba6db5c35000002", + "name": "Test Post", + "method": "POST", + "target_path": "http://httpbin.org/post", + "template_path": "", + "header_map": { + "x-tyk-test": "123456" + }, + "event_timeout": 0 + }, + "event_timeout": 60 + } + ] +} +``` + +**Sample Response** + +```json +{ + "Status": "OK", + "Message": "Api updated", + "Meta": "" +} +``` + +### Data Graphs API + +Currently `/api/data-graphs/` has only one endpoint called `/data-sources` with only a `POST` HTTP method. + +The Dashboard exposes the `/api/data-graphs/data-sources/import` endpoint which allows you to import an [AsyncAPI](https://www.asyncapi.com/docs/reference/specification/v3.0.0) or [OpenAPI](https://swagger.io/specification/) document. + +#### Supported AsyncAPI versions +* 2.0.0 +* 2.1.0 +* 2.2.0 +* 2.3.0 +* 2.4.0 + +#### Supported OpenAPI versions +* 3.0.0 + +#### Import a document from a remote resource + +| **Property** | **Description** | +| :-------------- | :-------------------------------------------- | +| Resource URL | `/api/data-graphs/data-sources/import` | +| Method | `POST` | +| Content-Type | `application/json` | +| Body | `{`
` "url": "resource URL" `
`}` | + +The fetched document can be an OpenAPI or AsyncAPI document. The format will be detected automatically. The data source import API only checks the fetched data and tries to determine the document format, the status codes are ignored. +It returns an error if it fails to determine the format and the document type. HTTP 500 is returned if a programming or network error occurs. If the fetched request body is malformed then HTTP 400 is returned. + +#### Import an OpenAPI document + +The data source import API supports importing OpenAPI documents. The document can be used as a request body. + +| **Property** | **Description** | +| :-------------- | :------------------------------------------- | +| Resource URL | `/api/data-graphs/data-sources/import` | +| Method | `POST` | +| Content-Type | `application/vnd.tyk.udg.v2.openapi` | +| Body | `` | + + +The document can be in JSON or YAML format. The import API can determine the type and parse it. + +#### Import an AsyncAPI document + +The data source import API supports importing AsyncAPI documents. The document can be used as a request body. + +| **Property** | **Description** | +| :-------------- | :---------------------------------------- | +| Resource URL | `/api/data-graphs/data-sources/import` | +| Method | `POST` | +| Content-Type | `application/vnd.tyk.udg.v2.asyncapi` | +| Body | `` | + +The document can be in JSON or YAML format. The import API can determine the type and parse it. + +#### Response Structure + +The response structure is consistent with other endpoints, as shown in the table below: + +| **Property** | **Description** | +| :-------------- | :------------------------------------------------------- | +| Status | `Error` or `OK` | +| Message | Verbal explanation | +| Meta | API ID for success and `null` with error (not in use) | + +**Sample Response** + +```json +{ + "Status": "OK", + "Message": "Data source imported", + "Meta": "64102568f2c734bd2c0b8f99" +} +``` + +### Analytics API + + + +Below APIs returns data only if you have Pump 1.7.0 + + + +#### Analytics of API Key +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/activity/keys/endpoint/{keyHash}/{startDay}/{startMonth}/{startYear}/{EndDay}/{EndMonth}/{EndYear}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +It returns analytics of the endpoints of all APIs called using KEY between start and end date. + +**Sample Request** +To get analytics of all endpoints called using the key `7f3c3ca87376cabe` between October 13th 2020 and October 14th 2020, make the following call: + +```http +GET api/activity/keys/endpoint/7f3c3ca87376cabe/13/10/2020/14/10/2020 HTTP/1.1 +Host: localhost:3000 +authorization: 7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +```json +{ + "data": [ + { + "id": { + "day": 0, + "month": 0, + "year": 0, + "hour": 0, + "code": 0, + "path": "/anything", + "key": "", + "alias": "", + "url": "", + "iso_country": "", + "api_id": "41351a6a94094da05f75146a695a16f6", + "api_name": "" + }, + "hits": 1, + "success": 1, + "error": 0, + "last_hit": "2020-10-13T13:22:49.667+05:30", + "request_time": 0, + "latency": 217, + "upstream_latency": 217, + "max_upstream_latency": 217, + "min_upstream_latency": 217, + "max_latency": 217, + "min_latency": 217 + }, + { + "id": { + "day": 0, + "month": 0, + "year": 0, + "hour": 0, + "code": 0, + "path": "/anything", + "key": "", + "alias": "", + "url": "", + "iso_country": "", + "api_id": "1793db2cbb724ad54da582ce3191d383", + "api_name": "" + }, + "hits": 1, + "success": 1, + "error": 0, + "last_hit": "2020-10-13T13:22:20.534+05:30", + "request_time": 568, + "latency": 568, + "upstream_latency": 568, + "max_upstream_latency": 568, + "min_upstream_latency": 568, + "max_latency": 568, + "min_latency": 568 + }, + ], + "pages": 1 +} +``` + + +#### Analytics of OAuth Client +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/activity/oauthid/endpoint/{OAuthClientID}/{startDay}/{startMonth}/{startYear}/{EndDay}/{EndMonth}/{EndYear}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +It returns analytics of the all endpoints called using the given OAuth Client ID. + +**Sample Request** +To get activity of all endpoints which used OAuth client `27b35a9ed46e429eb2361e440cc4005c` between October 13th 2020 and October 14th 2020, make the following call: + +```http +GET /api/activity/oauthid/endpoint/27b35a9ed46e429eb2361e440cc4005c/13/10/2020/14/10/2020 HTTP/1.1 +Host: localhost:3000 +authorization: 7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** +``` +{ + "data": [ + { + "id": { + "day": 0, + "month": 0, + "year": 0, + "hour": 0, + "code": 0, + "path": "/get", + "key": "", + "alias": "", + "url": "", + "iso_country": "", + "api_id": "79fc7cb80df940cc5089772200bd4926", + "api_name": "" + }, + "hits": 2, + "success": 1, + "error": 1, + "last_hit": "2020-10-13T14:48:51.582+05:30", + "request_time": 498, + "latency": 498, + "upstream_latency": 497.5, + "max_upstream_latency": 747, + "min_upstream_latency": 248, + "max_latency": 748, + "min_latency": 248 + }, + { + "id": { + "day": 0, + "month": 0, + "year": 0, + "hour": 0, + "code": 0, + "path": "/post", + "key": "", + "alias": "", + "url": "", + "iso_country": "", + "api_id": "79fc7cb80df940cc5089772200bd4926", + "api_name": "" + }, + "hits": 1, + "success": 1, + "error": 0, + "last_hit": "2020-10-13T14:49:31.541+05:30", + "request_time": 0, + "latency": 241, + "upstream_latency": 239, + "max_upstream_latency": 239, + "min_upstream_latency": 239, + "max_latency": 241, + "min_latency": 241 + } + ], + "pages": 1 +} +``` + +### Users API + + + +`USER_ID` is a placeholder for your User ID value. + + + + +#### List Users + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/users` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/users HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "users": [ + { + "api_model": {}, + "first_name": "John", + "last_name": "Smith", + "email_address": "john@jive.ly", + "password": "$2a$10$mRVfrAf72N66anVNhA1KVuYaOwOrXhFzxyg6bwgZemUeVo2MNOpIa", + "org_id": "54b53d3aeba6db5c35000002", + "active": true, + "id": "54b53d4bf25b920f09361526", + "access_key": "0cf5e6c37add465a406f19807c081765", + "user_permissions": { + "IsAdmin": "admin", + "ResetPassword": "admin" + } + }, + { + "api_model": {}, + "first_name": "Test", + "last_name": "User", + "email_address": "banana@test.com", + "password": "", + "org_id": "54b53d3aeba6db5c35000002", + "active": true, + "id": "54bd0ad9ff4329b88985aafb", + "access_key": "f81ee6f0c8f2467d539c132c8a422346", + "user_permissions": { + "user_groups": "read", + "users": "read" + } + } + ], + "pages": 0 +} +``` + +#### Get User + +| **Property** | **Description** | +| :------------ | :----------------------- | +| Resource URL | `/api/users/{USER_ID}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/users/54bd0ad9ff4329b88985aafb HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "api_model": {}, + "first_name": "Test", + "last_name": "User", + "email_address": "banana@test.com", + "password": "", + "org_id": "54b53d3aeba6db5c35000002", + "active": true, + "id": "54bd0ad9ff4329b88985aafb", + "access_key": "f81ee6f0c8f2467d539c132c8a422346" +} +``` + +#### Add User + + + +You can add a user via the API without a password by leaving out the `password` field. You then use [Set User Password](#set-user-password) request to add a password. + + + +You need to have the `users` [Permission object](/api-management/user-management#user-permissions) set to write to use **Add User**. + +If you do set a password, you need to keep a record of it, to enable the password to be reset in the future. + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/users` | +| Method | POST | +| Type | None | +| Body | User Object | +| Param | None | + +**Sample Request** + +```http +POST /api/users HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "first_name": "Jason", + "last_name": "Jasonson", + "email_address": "jason@jasonsonson.com", + "active": true, + "password": "thisisatest", + "user_permissions": { "IsAdmin": "admin" } +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "User created", + "Meta": "" +} +``` + +#### Set User Password + +If a user is created with a blank password, you will need to add a password in a second API call to set a password. In this scenario, the `current_password` field is not required. To change an current password, you need to know the existing password set in **Add User**. + +You need to have the `users` [Permission object](/api-management/user-management#user-permissions) set to **read** to use **Set User Password**. + +| **Property** | **Description** | +| :------------ | :------------------------------------- | +| Resource URL | `/api/users/{USER_ID}/actions/reset` | +| Method | POST | +| Type | None | +| Body | Password Object | +| Param | None | + +**Sample Request** + +```http +POST /api/users/54c25e845d932847067402e2/actions/reset HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "current_password": "12345", + "new_password":"test123456", + "user_permissions": { "IsAdmin": "admin" } +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "User password updated", + "Meta": "" +} +``` + +#### Allow Reset Password + +| **Property** | **Description** | +| :------------ | :------------------------------------------------------ | +| Resource URL | `/admin/users/{USER_ID}/actions/allow_reset_passwords`| +| Method | PUT | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** +```http +PUT -H "admin-auth: " http:///admin/users/{USER_ID}/actions/allow_reset_passwords +``` + +**Sample Response** +``` +{ + "Status": "OK", + "Message": "User updated", + "Meta": + { …user object payload …} +} +``` + +#### Disallow Reset Password + +| **Property** | **Description** | +| :------------ | :---------------------------------------------------------- | +| Resource URL | `/admin/users/{USER_ID}/actions/disallow_reset_passwords` | +| Method | PUT | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** +```http +PUT -H "admin-auth: " http:///admin/users/{USER_ID}/actions/disallow_reset_passwords +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "User updated", + "Meta": + { …user object payload …} +} +``` + +#### Update User + +You need to have the `users` [Permission object](/api-management/user-management#user-permissions) set to write to use **Update User**. + +| **Property** | **Description** | +| :------------ | :----------------------- | +| Resource URL | `/api/users/{USER_ID}` | +| Method | PUT | +| Type | None | +| Body | User Object | +| Param | None | + +**Sample Request** + +```http +PUT /api/users/54c25e845d932847067402e2 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "first_name": "Jason", + "last_name": "File", + "email_address": "jason.file@jasonsonson.com", + "active": true, + "user_permissions": { "IsAdmin": "admin" } +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "User updated", + "Meta": null +} +``` + +##### Reset User Session + +This call allows you to reset a user's current Dashboard session. + +You need to have the `users` [Permission object](/api-management/user-management#user-permissions) set to write to use this call. + + + +This also resets the user's Dashboard API credentials. + + + +| **Property** | **Description** | +| :------------ | :------------------------------------------ | +| Resource URL | `/api/users/{USER_ID}/actions/key/reset` | +| Method | PUT | +| Type | None | +| Body | `{"userId":"{USER_ID}"}` | +| Param | None | + +**Sample Request** + +```http +PUT /api/users/54c25e845d932847067402e2/actions/key/reset HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +{ + "userId":"{USER_ID}" +} +``` + +**Sample Response** + +```http +{ + "Status": "OK", + "Message": "User session renewed", + "Meta": null +} +``` + +#### Delete User + +| **Property** | **Description** | +| :------------ | :----------------------- | +| Resource URL | `/api/users/{USER_ID}` | +| Method | DELETE | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +DELETE /api/users/54c25e845d932847067402e2 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "User deleted", + "Meta": "" +} +``` + +### User Groups API + +#### List User Groups + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/usergroups` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/usergroups HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "groups": [ + { + "org_id": "54b53d3aeba6db5c35000002", + "id": "54b53d4bf25b920f09361526", + "name": "Analytics team", + "description": "Only access to analytics pages", + "active": true, + "user_permissions": { "analytics": "read" } + }, + { + "org_id": "54b53d3aeba6db5c35000003", + "id": "54b53d4bf25b920f09361527", + "name": "Certificates team", + "description": "Team to manage certificates", + "active": true, + "user_permissions": { "certificates": "write" } + } + ], + "pages": 0 +} +``` + +#### Get User Group + +| **Property** | **Description** | +| :------------ | :----------------------- | +| Resource URL | `/api/usergroups/{user_group-id}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/usergroups/54bd0ad9ff4329b88985aafb HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "org_id": "54b53d3aeba6db5c35000002", + "id": "54b53d4bf25b920f09361526", + "name": "Certificates team", + "description": "Team to manage certificates", + "active": true, + "user_permissions": { "certificates": "write" } +} +``` + +#### Add User Group + + + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/usergroups` | +| Method | POST | +| Type | None | +| Body | User Object | +| Param | None | + +**Sample Request** + +```http +POST /api/usergroups HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "name": "Logs team", + "description": "Logs team description", + "user_permissions": { "logs": "read" } +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "User group created", + "Meta": "" +} +``` + + + +#### Update User Group + +| **Property** | **Description** | +| :------------ | :----------------------- | +| Resource URL | `/api/usergroups/{user_group-id}` | +| Method | PUT | +| Type | None | +| Body | User Group Object | +| Param | None | + +**Sample Request** + +```http +PUT /api/usergroups/54c25e845d932847067402e2 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "name": "Certificates Team 2", + "description": "Another certificates team", +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "User group updated", + "Meta": null +} +``` + +#### Delete User Group + +| **Property** | **Description** | +| :------------ | :----------------------- | +| Resource URL | `/api/usergroups/{user_group-id}` | +| Method | DELETE | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +DELETE /api/usergroups/54c25e845d932847067402e2 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "User group deleted", + "Meta": "" +} +``` + +### Additional Permissions API + + + +This API helps you to add and delete (CRUD) a list of additional (custom) permissions for your Dashboard users. +Once created, a custom permission will be added to standard list of user permissions. +
+Only Admin Dashboard users will be authorized to use this API. +
+ + + + +#### List Additional Permissions + +This API returns by default the initial set of additional permissions defined in your Tyk Dashboard configuration, under [security.additional_permissions](/tyk-dashboard/configuration#securityadditional_permissions). + +Once you update the permissions via the API, they will be stored at organization level. + +| **Property** | **Description** | +| :------------ | :--------------------- | +| Resource URL | `/api/org/permissions`| +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/org/permissions HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "additional_permissions": { + "api_developer": "API Developer", + "api_manager": "API Manager" + } +} +``` + +#### Add/Delete/Update Additional Permission + + + +Whenever you want to add/update/delete an additional permission, just send back the updated list of permissions, through a PUT request to the API. + + + + +| **Property** | **Description** | +| :------------ | :------------------------ | +| Resource URL | `/api/org/permission` | +| Method | PUT | +| Type | None | +| Body | Permissions Object | +| Param | None | + +**Sample Request** + +Let's imagine we have already defined two additional permissions: `api_developer` and `api_manager`. In order to add a new permission to this list, just send +an updated list of permissions by appending the values you want. In this example we will add a `custom_permission` permission. + +```http +PUT /api/org/permissions HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "additional_permissions": { + "api_developer": "API Developer", + "api_manager": "API Manager", + "custom_permission": "Custom Permission" + } +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "Additional Permissions updated in org level", + "Meta": null +} +``` + +### Access Keys Management API + + + +`{api-id}` can either be the internal or external API id. + + + +#### Get a list of Keys + +**Note:** This will not work with a hashed key set. + +| **Property** | **Description** | +| :------------ | :-------------------------- | +| Resource URL | `/api/apis/{api-id}/keys` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request:** + +```http +GET /api/apis/39d2c98be05c424371c600bd8b3e2242/keys HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response:** + +```yalm +{ + "data": { + "keys": [ + "54b53d3aeba6db5c3500000289a8fbc2bbba4ebc4934bb113588c792", + "54b53d3aeba6db5c3500000230459d8568ec4bbf675bda2ff05e9293", + "54b53d3aeba6db5c35000002ec9a2b1aca7b495771273a0895cb3627", + "54b53d3aeba6db5c3500000272d97a10538248e9523ca09e425090b8", + "54b53d3aeba6db5c3500000252b5c56c61ad42fe765101f6d70cf9c6" + ] + }, + "pages": 0 +} +``` + +#### Get a specific key + +| **Property** | **Description** | +| :------------ | :----------------------------------- | +| Resource URL | `/api/apis/{api-id}/keys/{key-id}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/apis/39d2c98be05c424371c600bd8b3e2242/keys/54b53d3aeba6db5c3500000289a8fbc2bbba4ebc4934bb113588c792 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response:** + +``` +{ + "api_model": {}, + "key_id": "54b53d3aeba6db5c3500000289a8fbc2bbba4ebc4934bb113588c792", + "data": { + "last_check": 1421674410, + "allowance": 1000, + "rate": 1000, + "per": 60, + "expires": 1423684135, + "quota_max": -1, + "quota_renews": 1421164189, + "quota_remaining": -1, + "quota_renewal_rate": 60, + "access_rights": { + "39d2c98be05c424371c600bd8b3e2242": { + "api_name": "Nitrous Test", + "api_id": "39d2c98be05c424371c600bd8b3e2242", + "versions": [ + "Default" + ] + } + }, + "org_id": "54b53d3aeba6db5c35000002", + "oauth_client_id": "", + "basic_auth_data": { + "password": "" + }, + "hmac_enabled": true, + "hmac_string": "" + } +} +``` + + +#### Create a custom key + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/keys/{custom-key-id}` | +| Method | POST | +| Type | None | +| Body | Session Object | +| Param | None | + +**Sample Request** + +```http +POST /api/keys/my-custom-key HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "apply_policies": ["5ecc0b91081ac40001ed261c"], + "org_id" : "5eb06f441fe4c4000147476e", + + // Below gets overwritten by the Policy, required nonetheless + "expires": 0, + "allowance": 0, + "per": 0, + "quota_max": 0, + "rate": 0, + "access_rights": { + "b742100081764ff06b00f75733145614": { + "api_name": "", + "api_id": "b742100081764ff06b00f75733145614", + "versions": [ + "Default" + ] + } + } +} +``` + +You might be wondering why `access_rights` is necessary, as we are adding a security policy and inheriting the access rights from there. That's because of legacy functionality. We need to add any APIs `api_id` to the key of the access_rights map, as well as the `api_id` value of that key. This will all get overwritten by the policy, but we need to add it. + +**Sample Response:** + + +``` +{ + "api_model": {}, + "key_id": "eyJvcmciOiI1ZTlkOTU0NGExZGNkNjAwMDFkMGVkMjAiLCJpZCI6ImhlbGxvLXdvcmxkIiwiaCI6Im11cm11cjY0In0=", + "data": { + ... + }, + "key_hash": "567b9a5419c3a9ef" +} +``` + +You can now use `my-custom-key` as a key to access the API. Furthermore, you can use it to lookup the key in the Dashboard as well as the generated `key_hash` in the response. + +Let's try curling it: + +```curl +$ curl localhost:8080/my-api/users/1 --header "Authorization: my-custom-key" +{ + "response" : "hello world" +} +``` + +#### Generate a key + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/keys` | +| Method | POST | +| Type | None | +| Body | Session Object | +| Param | None | + +**Sample Request** + +```http +POST /api/keys HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "last_check": 0, + "allowance": 1000, + "rate": 1000, + "per": 60, + "expires": 0, + "quota_max": 10000, + "quota_renews": 1424543479, + "quota_remaining": 10000, + "quota_renewal_rate": 2520000, + "access_rights": { + "bc2f8cfb7ab241504d9f3574fe407499": { + "api_id": "bc2f8cfb7ab241504d9f3574fe407499", + "api_name": "Test", + "versions": [ + "Default" + ] + } + } +} +``` + +**Sample Response:** + +``` +{ + "api_model": {}, + "key_id": "54b53d3aeba6db5c3500000216d056646b4b4ffe4e54f5b07d658f8a", + "data": { + "last_check": 0, + "allowance": 1000, + "rate": 1000, + "per": 60, + "expires": 0, + "quota_max": 10000, + "quota_renews": 1424543479, + "quota_remaining": 10000, + "quota_renewal_rate": 2520000, + "access_rights": { + "bc2f8cfb7ab241504d9f3574fe407499": { + "api_name": "Test", + "api_id": "bc2f8cfb7ab241504d9f3574fe407499", + "versions": [ + "Default" + ] + } + }, + "org_id": "54b53d3aeba6db5c35000002", + "oauth_client_id": "", + "basic_auth_data": { + "password": "" + }, + "hmac_enabled": false, + "hmac_string": "" + } +} +``` + +#### Update a key + +| **Property** | **Description** | +| :------------ | :------------------------------------ | +| Resource URL | `/api/apis/{api-id}/keys/{keyId}` | +| Method | PUT | +| Type | None | +| Body | Session Object | +| Param | None | + +**Sample Request** + +```http +PUT /api/apis/39d2c98be05c424371c600bd8b3e2242/keys/54b53d3aeba6db5c3500000272d97a10538248e9523ca09e425090b8 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "last_check": 0, + "allowance": 1000, + "rate": 1000, + "per": 60, + "expires": 1422113671, + "quota_max": -1, + "quota_renews": 1421675253, + "quota_remaining": -1, + "quota_renewal_rate": 60, + "access_rights": { + "39d2c98be05c424371c600bd8b3e2242": { + "api_id": "39d2c98be05c424371c600bd8b3e2242", + "api_name": "Nitrous Test", + "versions": [ + "Default" + ] + } + }, + "org_id": "54b53d3aeba6db5c35000002", + "oauth_client_id": "", + "basic_auth_data": { + "password": "" + }, + "hmac_enabled": false, + "hmac_string": "" +} +``` + +**Sample Response:** + +``` +{ + "Status": "OK", + "Message": "Key updated", + "Meta": "" +} +``` + +#### Delete a key + +| **Property** | **Description** | +| :------------ | :--------------------------------- | +| Resource URL | `/api/apis/{api-id}/keys/{keyId}` | +| Method | DELETE | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +DELETE /api/apis/39d2c98be05c424371c600bd8b3e2242/keys/54b53d3aeba6db5c3500000272d97a10538248e9523ca09e425090b8 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response:** + +``` +{ + "Status": "OK", + "Message": "Key deleted succesfully", + "Meta": "" +} +``` + +#### Graphql API + +Presently, the Tyk Dashboard uses the GraphQL API for keys. + +| **Method** | **URL** | **Description** | +| :---------- | :------------- | :--------------------------- | +| POST | `/graphql` | GraphQL query endpoint | +| GET | `/playground` | Dashboard Graphql Playground - where you could see docs and run queries | + + +### Basic Authentication API + +Basic Auth users are essentially a form of API token, just with a customized, pre-set organization-specific ID instead of a generated one. To interact with basic auth users, you can use the API Token API calls (list, get delete etc.) + +#### Create a user + +| **Property** | **Description** | +| :------------ | :--------------------------------- | +| Resource URL | `/api/apis/keys/basic/{username}` | +| Method | POST | +| Type | None | +| Body | Session Object | +| Param | None | + +**Sample Request** + +```http +POST /api/apis/keys/basic/test-user HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "last_check": 0, + "allowance": 1000, + "rate": 1000, + "per": 60, + "expires": 0, + "quota_max": 10000, + "quota_renews": 1424543479, + "quota_remaining": 10000, + "quota_renewal_rate": 2520000, + "access_rights": { + "bc2f8cfb7ab241504d9f3574fe407499": { + "api_id": "bc2f8cfb7ab241504d9f3574fe407499", + "api_name": "Test", + "versions": [ + "Default" + ] + } + }, + "basic_auth_data": { + "password": "test123" + } +} +``` + +**Sample Response** + +``` +{ + "api_model": {}, + "key_id": "54b53d3aeba6db5c3500000test-user", + "data": { + "last_check": 0, + "allowance": 1000, + "rate": 1000, + "per": 60, + "expires": 0, + "quota_max": 10000, + "quota_renews": 1424543479, + "quota_remaining": 10000, + "quota_renewal_rate": 2520000, + "access_rights": { + "bc2f8cfb7ab241504d9f3574fe407499": { + "api_name": "Test", + "api_id": "bc2f8cfb7ab241504d9f3574fe407499", + "versions": [ + "Default" + ] + } + }, + "org_id": "54b53d3aeba6db5c35000002", + "oauth_client_id": "", + "basic_auth_data": { + "password": "" + }, + "hmac_enabled": false, + "hmac_string": "" + } +} +``` + +### OAuth Key Management API + +#### Create a new OAuth2.0 Client + +Any OAuth keys must be generated under an API in the Dashboard. Any POST requests made should contain the API's ID in the URL. + +| **Property** | **Description** | +| :------------ | :---------------------------- | +| Resource URL | `/api/apis/oauth/{{api-id}}` | +| Method | POST | +| Type | JSON | +| Body | Client Object | + + +**Sample Request** + +```curl + curl -vX POST -H "Authorization: {{API Access Credentials}}" \ + -H "Content-Type: application/json" \ + -d '{"redirect_uri": "", "policy_id": "{{policy_id}}"}' http://{{dasboard-hostname}}/api/apis/oauth/{{api-id}} +``` + +**Sample Response** + +```yaml +{ + "client_id": "72083e90e9b044c57e2667d49effff78", + "secret": "YWUxZTM2ODItOTJjYS00MmIyLTQxZGEtZTE0M2MzNmYwMDI2", + "redirect_uri": "", + "policy_id": "57f7b07647e0800001aa2320" +} +``` + +#### List OAuth Clients + +| **Property** | **Description** | +| :------------ | :---------------------------- | +| Resource URL | `/api/apis/oauth/{{api-id}}` | +| Method | GET | +| Type | JSON | +| Body | NONE | + + +**Sample Request** + +```http +curl -vX GET -H "Authorization: {{API Access Credentials}}" \ + -H "Content-Type: application/json" \ + http://{{dashboard-hostname}}/api/apis/oauth/{{api-id}} +``` + +**Sample Response** + +```yaml +{ + "apps": [ + { + "client_id": "7dce7fc297424fd65596b51c214666a4", + "secret":"Yzg0ZDRjZTctMzUxNy00YmQ5LTRkM2UtMDdmODQ3MTNjNWM5", + "redirect_uri": "/cats", + "policy_id": "57f7b07647e0800001aa2320" + }, + { + "client_id": "72083e90e9b044c57e2667d49effff78", + "secret": "YWUxZTM2ODItOTJjYS00MmIyLTQxZGEtZTE0M2MzNmYwMDI2", + "redirect_uri": "", + "policy_id": "57f7b07647e0800001aa2320" + } + ], + "pages":0 +} +``` + +#### Get an OAuth2.0 Client + +| **Property** | **Description** | +| :------------ | :------------------------------------------ | +| Resource URL | `/api/apis/oauth/{{api-id}}/{{client_id}}` | +| Method | GET | +| Type | JSON | +| Body | NONE | + + +**Sample Request** + +```curl +curl -vX GET -H "Authorization: {{API Access Credentials}}" \ + -H "Content-Type: application/json" \ + http://localhost:3000/api/apis/oauth/{{api-id}}/{{client_id}} +``` + +**Sample Response** + +```yaml +{ + "client_id": "7dce7fc297424fd65596b51c214666a4", + "secret": "Yzg0ZDRjZTctMzUxNy00YmQ5LTRkM2UtMDdmODQ3MTNjNWM5", + "redirect_uri": "/cats", + "policy_id": "57f7b07647e0800001aa2320" +} +``` + +#### Delete OAuth Client + +You can delete an OAuth client using a simple DELETE method. Please note that tokens issued with the client ID will still be valid until they expire. + +| **Property** | **Description** | +| :------------ | :------------------------------------------ | +| Resource URL | `/api/apis/oauth/{{api-id}}/{{client_id}}` | +| Method | DELETE | +| Type | JSON | +| Body | NONE | + + +**Sample Request** + +```curl +curl -vX DELETE -H "Authorization: {{API Access Credentials}}" \ + -H "Content-Type: application/json" \ + http://{{dashboard-hostname}}/api/apis/oauth/{{api-id}}/{{client_id}} +``` + +**Sample Response** + +```json +{ + "Status": "OK", + "Message": "OAuth Client deleted successfully", + "Meta": null +} +``` + +#### Retrieve All Current Tokens for Specified OAuth2.0 Client + +This endpoint allows you to retrieve a list of all current tokens and their expiry date for a provided API ID and OAuth-client ID in the following format. This endpoint will work only for newly created tokens. + + + +This option is available from v2.6.0 onwards. + + + + +| **Property** | **Description** | +| :------------ | :---------------------------------------------------- | +| Resource URL | `/api/apis/oauth/{apiID}/{oauthClientId}/tokens` | +| Method | GET | +| Type | | +| Body | NONE | + +**Sample Request** +```http +GET /api/apis/oauth/528a67c1ac9940964f9a41ae79235fcc/25348e8cf157409b52e39357fd9578f1/tokens HTTP/1.1 +Host: localhost:3000 +Authorization: {{API Access Credentials}} +Cache-Control: no-cache +``` + +**Sample Response** +```yaml +[ + { + "code": "5a7d110be6355b0c071cc339327563cb45174ae387f52f87a80d2496", + "expires": 1518158407 + }, + { + "code": "5a7d110be6355b0c071cc33988884222b0cf436eba7979c6c51d6dbd", + "expires": 1518158594 + }, + { + "code": "5a7d110be6355b0c071cc33990bac8b5261041c5a7d585bff291fec4", + "expires": 1518158638 + }, + { + "code": "5a7d110be6355b0c071cc339a66afe75521f49388065a106ef45af54", + "expires": 1518159792 + } +] +``` + +You can control how long you want to store expired tokens in this list using `oauth_token_expired_retain_period` which specifies retain period for expired tokens stored in Redis. By default expired token not get removed. See [here](/tyk-oss-gateway/configuration#oauth_token_expired_retain_period) for more details. + +#### Revoke a Single OAuth Client Token + +| **Property** | **Description** | +| :------------ | :---------------------------------------------- | +| Resource URL | `/api/apis/oauth/{oauthClientId}/revoke` | +| Method | POST | +| Type | JSON | +| Body | Client Object | +| Param | None | + + +**Sample Request** + +```http +POST /api/apis/oauth/411f0800957c4a3e81fe181141dbc22a/revoke +Host: localhost +Authorization 64c8e662f6924c4f55e94a873d75e44d +Body: { + "token": "eyJvcmciOiI1ZTIwOTFjNGQ0YWVmY2U2MGMwNGZiOTIiLCJpZCI6IjIyODQ1NmFjNmJlMjRiMzI5MTIyOTdlODQ5NTc4NjJhIiwiaCI6Im11cm11cjY0In0=", + "token_type_hint": "access_token" +} +``` +**Sample Response** + +```json +{ + "Status": "OK", + "Message": "token revoked successfully", + "Meta": null +} +``` +#### Revoke all OAuth Client Tokens + +| **Property** | **Description** | +| :------------ | :---------------------------------------------- | +| Resource URL | `/api/apis/oauth/{oauthClientId}/revoke_all` | +| Method | POST | +| Type | JSON | +| Body | Client Object | +| Param | None | + +**Sample Request** + +```http +POST /api/apis/oauth/411f0800957c4a3e81fe181141dbc22a/revoke_all +Host: localhost +Authorization: 64c8e662f6924c4f55e94a873d75e44d +Body: { + "client_secret":"MzUyNDliNzItMDhlNy00MzM3LTk1NWUtMWQyODMyMjkwZTc0" +} +``` + +**Sample Response** + +```json +{ + "Status": "OK", + "Message": "tokens revoked successfully", + "Meta": null +} +``` + +#### OAuth2.0 Authorization Code + +This endpoint is used in the [Authorization Code Grant](/api-management/authentication/oauth-2#using-the-authorization-code-grant) flow, generating an authorization code that can be used by the client to request an access token. + +| **Property** | **Description** | +| :------------ | :---------------------------------------------- | +| Resource URL | `/api/apis/oauth/{{api_id}}/authorize-client/` | +| Method | POST | +| Type | Form-Encoded | +| Body | Fields (see below) | + +* `api_id`: Unlike the other requests on this page, this must be the `api_id` value and **NOT** the API's `id` value. +* `response_type`: Should be provided by requesting client as part of authorization request, this should be either `code` or `token` depending on the methods you have specified for the API. +* `client_id`: Should be provided by requesting client as part of authorization request. The Client ID that is making the request. +* `redirect_uri`: Should be provided by requesting client as part of authorization request. Must match with the record stored with Tyk. +* `key_rules`: A string representation of a Session Object (form-encoded). *This should be provided by your application in order to apply any quotas or rules to the key.* + +Note that in the following example, the `policy_id` isn't included in the request as these are optional. OAuth2.0 Flow also supports callbacks which can be added to the `key_rules` in the payload in requests that don't include the `policy_id`. + + +**Sample Request** + +```curl +curl -vX POST -H "Authorization: {{API Access Credentials}}" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d 'response_type=code&client_id={{client_id}}&redirect_uri=http%3A%2F%2Foauth.com%2Fredirect&key_rules=%7B+++++%22allowance%22%3A+999%2C+++++%22rate%22%3A+1000%2C+++++%22per%22%3A+60%2C+++++%22expires%22%3A+0%2C+++++%22quota_max%22%3A+-1%2C+++++%22quota_renews%22%3A+1406121006%2C+++++%22quota_remaining%22%3A+0%2C+++++%22quota_renewal_rate%22%3A+60%2C+++++%22access_rights%22%3A+%7B+++++++++%22528a67c1ac9940964f9a41ae79235fcc%22%3A+%7B+++++++++++++%22api_name%22%3A+%22{{api_name}}%22%2C+++++++++++++%22api_id%22%3A+%{{api_id}}%22%2C+++++++++++++%22versions%22%3A+%5B+++++++++++++++++%22Default%22+++++++++++++%5D+++++++++%7D+++++%7D%2C+++++%22org_id%22%3A+%22{{org_id}}%22+%7D' +http://{{dashboard-hostname}}/api/apis/oauth/{{api_id}}/authorize-client +``` + +**Sample Response** + +``` +{ + "code": "MWY0ZDRkMzktOTYwNi00NDRiLTk2YmQtOWQxOGQ3Mjc5Yzdk", + "redirect_to": "http://localhost:3000/oauth-redirect/?code=MWY0ZDRkMzktOTYwNi00NDRiLTk2YmQtOWQxOGQ3Mjc5Yzdk" +} +``` + +### Single Sign On API + + + +This functionality is available from [v2.9.0](/developer-support/release-notes/archived#single-sign-on-for-the-tyk-saas). If you have an older version please using the [admin api](/api-management/dashboard-configuration#single-sign-on-api-1) + + + + +The Dashboard SSO API allows you to implement custom authentication schemes for the Dashboard and Portal. +Our Tyk Identity Broker (TIB) internally also uses this API. + +#### Generate authentication token + +The Dashboard exposes the `/api/sso` Dashboard API which allows you to generate a temporary authentication token, valid for 60 seconds. + +You should provide JSON payload with the following data: + +* `ForSection` - scope with possible values of `"dashboard"` or `"portal"` only. +* `OrgID` - organization id +* `EmailAddress` - user email +* `GroupID` - user group id ( it is the mongo id and you can can find it in the url when opening a user group via Tyk- Dashboard UI or if you call Tyk-Dashboard REST API `/api/usergroups` ) + + +| **Property** | **Description** | +| :------------ | :---------------------------- | +| Resource URL | `/api/sso` | +| Method | POST | +| Body | `{"ForSection":"", "OrgID": "", "EmailAddress": "", "GroupID": ""}` | + +**Sample Request** + +```http +POST /api/sso HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "ForSection": "dashboard", + "OrgID": "588b4f0bb275ff0001cc7471", + "EmailAddress": "name@somewhere.com", + "GroupID": "" +} +``` + +**Sample Response:** +```json +{ + "Status": "OK", + "Message": "SSO Nonce created", + "Meta": "YTNiOGUzZjctYWZkYi00OTNhLTYwODItZTAzMDI3MjM0OTEw" +} +``` + +See [Single Sign On](/api-management/external-service-integration#single-sign-on-sso) documentation for how to use this token and more details. + +### Web Hooks API + +#### List web hooks + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/hooks` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/hooks HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "hooks": [ + { + "api_model": {}, + "id": "54be6c0beba6db07a6000002", + "org_id": "54b53d3aeba6db5c35000002", + "name": "Test Post", + "method": "POST", + "target_path": "http://httpbin.org/post", + "template_path": "", + "header_map": { + "x-tyk-test": "123456" + }, + "event_timeout": 0 + } + ], + "pages": 0 +} +``` + +#### Get single web hook + +| **Property** | **Description** | +| :------------ | :---------------------- | +| Resource URL | `/api/hooks/{hook-id}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/hooks/54be6c0beba6db07a6000002 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "api_model": {}, + "id": "54be6c0beba6db07a6000002", + "org_id": "54b53d3aeba6db5c35000002", + "name": "Test Post", + "method": "POST", + "target_path": "http://httpbin.org/post", + "template_path": "", + "header_map": { + "x-tyk-test": "123456" + }, + "event_timeout": 0 +} +``` + +#### Add hook + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/api/hooks` | +| Method | POST | +| Type | None | +| Body | Hook object | +| Param | None | + +**Sample Request** + +```http +POST /api/hooks HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "name": "New Post Test", + "method": "POST", + "target_path": "http://httpbin.org/post", + "header_map": { + "x-test": "y-answer" + } +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "Webhook created", + "Meta": "" +} +``` + +#### Update hook + +| **Property** | **Description** | +| :------------ | :---------------------- | +| Resource URL | `/api/hooks/{hook-id}` | +| Method | PUT | +| Type | None | +| Body | Hook object | +| Param | None | + +**Sample Request** + +```http +PUT /api/hooks/54c2617aeba6db1edc000003 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "api_model": {}, + "id": "54c2617aeba6db1edc000003", + "org_id": "54b53d3aeba6db5c35000002", + "name": "New Post Test", + "method": "PUT", + "target_path": "http://httpbin.org/post", + "template_path": "", + "header_map": { + "x-test": "y-answer" + }, + "event_timeout": 0 +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "Webhook updated", + "Meta": "" +} +``` + +#### Delete web hook + +| **Property** | **Description** | +| :------------ | :------------------------- | +| Resource URL | `/api/hooks/{hook-id}` | +| Method | DELETE | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +DELETE /api/hooks/54c2617aeba6db1edc000003 HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "Webhook deleted", + "Meta": "" +} +``` + +### Open Policy Agent API + + + +The Open Policy Agent API helps you to manage (CRUD) the OPA (Open Policy Agent) rules that are being applied to the Tyk Dashboard. You can also change the OPA settings, such as to enable/disable it or enable/disable the debug mode. + +Only Admin role Dashboard users are authorized to use it. + + + +For more information on how to configure OPA see [Open Policy Agent](/api-management/dashboard-configuration#extend-permissions-using-open-policy-agent-opa). +#### List OPA rules and settings + +This endpoint returns by defaul the initial set of OPA rules defined in your Tyk Dashboard, which are located in [schema/dashboard.rego](/api-management/dashboard-configuration#dashboard-opa-rules) (accessible in Self-Managed installations). + +Once you update the rules via the API, the OPA rules will be stored at the organization level. + +| **Property** | **Description** | +| :------------ | :--------------------- | +| Resource URL | `/api/org/opa `| +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /api/org/opa HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +**Sample Response** + +``` +{ + "open_policy": { + "enabled": true, + "rules": "default hello = false\r\n\r\nhello {\r\n m := input.message\r\n m == \"world\"\r\n}" + } +} +``` +#### Update OPA rules and settings + + + +Whenever you want to update OPA rules or its settings, send the updated value of the OPA rules or changed values for the settings (`enabled`) via a PUT request to the `permissions` endpoint. + + + + +| **Property** | **Description** | +| :------------ | :------------------------ | +| Resource URL | `/api/org/permission` | +| Method | PUT | +| Type | None | +| Body | Permissions Object | +| Param | None | + +**Sample Request** + +```http +PUT /api/org/opa HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 +``` + +``` +{ + "open_policy": { + "enabled": false, + "rules": "default hello = false\r\n\r\nhello {\r\n m := input.message\r\n m == \"world\"\r\n}" + } +} +``` + +**Sample Response** + +``` +{ + "Status": "OK", + "Message": "OPA rules has been updated on org level", + "Meta": null +} +``` + +## Dashboard Admin API Resources and Usage + +### API Usage Instructions + + + +**Important Note on Spelling:** + +While our documentation now uses American English [(en-us)](https://www.andiamo.co.uk/resources/iso-language-codes/), the product itself, including all user interfaces, configuration +fields, environment variables, and APIs, continues to use British English spellings. When interacting with the product, +please continue using the British English (en-gb) spellings as appear in the interface and API. This change does not affect +how you use the product; all functionality remains the same. + +
+ +**Example:** The API endpoint `/organisations` as shown throughout this page uses British spelling (with an 's' not 'z'). +In all other instances, such as when describing or referring to this object in the documentation, we will use the +American spelling β€œorganization” with a 'z'. +
+ + + + +In a production environment, you must change the default `admin_Secret` in the`tyk_analytics.conf` file. Admin APIs use this value for authentication, and you should set it in the `admin-auth` header while sending the request. +
+
+ + +For the official Tyk Dashboard Admin API Reference, please visit our [API Documentation](/dashboard-admin-api). + +### Organizations API + +#### Retrieve a single Organization + +| **Property** | **Description** | +| :------------ | :--------------------------------- | +| Resource URL | `/admin/organisations/{org-id}` | +| Method | GET | +| Type | None | +| Body | Organization Object | +| Param | None | + +**Sample Request** + +```http +GET /admin/organisations/{ORG_ID} HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 +``` + +**Sample Response** + +```json +{ + "id": "5cc03283d07e7f00019404b3", + "owner_name": "TestOrg5 Ltd.", + "owner_slug": "testorg", + "cname_enabled": true, + "cname": "www.tyk-portal-test.com", + "apis": [ + { + "api_human_name": "First API #Test", + "api_id": "5508bd9429434d5768c423a04db259ea" + } + ], + "developer_quota": 0, + "developer_count": 0, + "event_options": {}, + "hybrid_enabled": false, + "ui": { + "languages": {}, + "hide_help": false, + "default_lang": "", + "login_page": {}, + "nav": {}, + "uptime": {}, + "portal_section": {}, + "designer": {}, + "dont_show_admin_sockets": false, + "dont_allow_license_management": false, + "dont_allow_license_management_view": false, + "cloud": false + }, + "org_options_meta": {} +} +``` + + +#### Retrieve all Organizations + +| **Property** | **Description** | +| :------------ | :------------------------- | +| Resource URL | `/admin/organisations/' | +| Method | GET | +| Type | None | +| Body | Organization Object | +| Param | None | + +**Sample Request** + +```http +GET /admin/organisations/ HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 +``` + +**Sample Response** + +```json +{ + "organisations": [ + { + "id": "5cc03283d07e7f00019404b3", + "owner_name": "TestOrg5 Ltd.", + "owner_slug": "testorg", + "cname_enabled": true, + "cname": "www.tyk-portal-test.com", + "apis": [ + { + "api_human_name": "First API #Test", + "api_id": "5508bd9429434d5768c423a04db259ea" + } + ], + "developer_quota": 0, + "developer_count": 0, + "event_options": {}, + "hybrid_enabled": false, + "ui": { + "languages": {}, + "hide_help": false, + "default_lang": "", + "login_page": {}, + "nav": {}, + "uptime": {}, + "portal_section": {}, + "designer": {}, + "dont_show_admin_sockets": false, + "dont_allow_license_management": false, + "dont_allow_license_management_view": false, + "cloud": false + }, + "org_options_meta": {} + }, + { + "id": "5ccae84aa402ce00018b5435", + "owner_name": "Jively", + "owner_slug": "", + "cname_enabled": true, + "cname": "jive.ly", + "apis": [], + "developer_quota": 0, + "developer_count": 0, + "event_options": {}, + "hybrid_enabled": false, + "ui": { + "languages": {}, + "hide_help": false, + "default_lang": "", + "login_page": {}, + "nav": {}, + "uptime": {}, + "portal_section": {}, + "designer": {}, + "dont_show_admin_sockets": false, + "dont_allow_license_management": false, + "dont_allow_license_management_view": false, + "cloud": false + }, + "org_options_meta": {} + } + ], + "pages": 0 +} +``` + +#### Create an Organization + +| **Property** | **Description** | +| :------------ | :------------------------- | +| Resource URL | `/admin/organisations/` | +| Method | POST | +| Type | None | +| Body | Organization Object | +| Param | None | + +**Sample Request** + +```http +POST /admin/organisations/ HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 + +{ + "owner_name": "Jively", + "cname": "jive.ly", + "cname_enabled": true +} +``` + +**Sample response** + +```json +{ + "Status":"OK", + "Message":"Org created", + "Meta":"54b53d3aeba6db5c35000002" +} +``` + +#### Update an Organization + +| **Property** | **Description** | +| :------------ | :----------------------------- | +| Resource URL | `/admin/organisations/{id}` | +| Method | PUT | +| Type | None | +| Body | Organization Object | +| Param | None | + +**Sample Request** + +```http +PUT /admin/organisations/54b53d3aeba6db5c35000002 HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 + +{ + "owner_name": "Jively", + "cname": "jive.ly", + "cname_enabled": true +} +``` + +**Sample Response** + +```json +{ + "Status":"OK", + "Message":"Org updated", + "Meta":"" +} +``` + +#### Delete an Organization + +| **Property** | **Description** | +| :------------ | :----------------------------- | +| Resource URL | `/admin/organisations/{id}` | +| Method | DELETE | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +DELETE /admin/organisations/54b53d3aeba6db5c35000002 HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 +``` + +**Sample Response** + +```json +{ + "Status":"OK", + "Message":"Org deleted", + "Meta":"" +} +``` + +### Users API + +#### Get User + +| **Property** | **Description** | +| :------------ | :------------------------- | +| Resource URL | `/admin/users/{USER_ID}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /admin/users/54bd0ad9ff4329b88985aafb HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 +``` + +**Sample Response** + + +```json +{ + "api_model": {}, + "first_name": "Test", + "last_name": "User", + "email_address": "banana@test.com", + "password": "", + "org_id": "54b53d3aeba6db5c35000002", + "active": true, + "id": "54bd0ad9ff4329b88985aafb", + "access_key": "f81ee6f0c8f2467d539c132c8a422346" +} +``` + +#### Add user + +When you add a new user, they are created without a password being set. After adding a user, you need to use the [Set Password](#set-user-password) call to set a password using the `user-id` created. + +| **Property** | **Description** | +| :------------ | :--------------- | +| Resource URL | `/admin/users` | +| Method | POST | +| Type | None | +| Body | User Object | +| Param | None | + +**Sample Request** + +```http +POST /admin/users HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 + +{ + "org_id": "5d15d3068ba30a0001621bfe", + "first_name": "Jason", + "last_name": "Jasonson", + "email_address": "jason@jasonsonson.com", + "active": true, + "user_permissions": { "IsAdmin": "admin" } +} +``` + + + +You can also create a user without an `org_id`. This will create a "Super User", who has global access to all APIs, Policies, etc, for all organizations created within Tyk. + + + + +**Sample Response** + + +``` +{ + "Status": "OK", + "Message": "e5485fa02e12425974e1220e1636e4d0", + "Meta": { + "api_model": {}, + "first_name": "Jason", + "last_name": "user", + "email_address": "jason@jasonsonson.com", + "org_id": "", + "active": true, + "id": "5d55378edd4b9e9c308e87da", + "access_key": "e5485fa02e12425974e1220e1636e4d0", + "user_permissions": { + "IsAdmin": "admin" + }, + "group_id": "", + "password_max_days": 0, + "password_updated": "0001-01-01T00:00:00Z", + "PWHistory": [], + "created_at": "2019-08-15T10:44:30.784Z" + } +} +``` + + +#### Update User + +You need to have the `users` [Permission object](/api-management/user-management#user-permissions) set to write to use **Update User**. + +| **Property** | **Description** | +| :------------ | :------------------------ | +| Resource URL | `/admin/users/{USER_ID}` | +| Method | PUT | +| Type | None | +| Body | User Object | +| Param | None | + + +**Sample Request** + +```http +PUT /admin/users/54c25e845d932847067402e2 HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 + +{ + "access_key": "3a8c1cea90af485575bb5455c2e9fb68", + "first_name": "Jason", + "last_name": "File", + "email_address": "jason.file@jasonsonson.com", + "active": true, + "password": "plaintext_password", + "user_permissions": { "IsAdmin": "admin" } +} +``` + + + +If you are modifying a user password, you will need to include an access_key in the body of your request. This can be obtained from doing a GET to the same Resource URL. + + + +**Sample Response** + + +```json +{ + "Status": "OK", + "Message": "User updated", + "Meta": "" +} +``` + +### Single Sign On API + +The Dashboard Admin SSO API endpoint allows you to implement custom authentication schemes for the Dashboard and Portal. Our Tyk Identity Broker (TIB) internally also uses this API. See [Single Sign On](/api-management/external-service-integration#single-sign-on-sso) for more details. + +#### Generate authentication token + +The Dashboard exposes the `/admin/sso` Admin API which allows you to generate a temporary authentication token, valid for 60 seconds. + +You should provide JSON payload with the following data: + +* `ForSection` - scope with possible values of `"dashboard"` or `"portal"` +* `OrgID` - with your organization id. +* `GroupID` - the group id +* `EmailAddress` - user email + + +| **Property** | **Description** | +| :------------ | :---------------------------- | +| Resource URL | `/admin/sso` | +| Method | POST | +| Body | `{"ForSection":"", "OrgID": "", "GroupID": ""}` | + +**Sample Request** + +```http +POST /admin/sso HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 + +{ + "ForSection": "dashboard", + "OrgID": "588b4f0bb275ff0001cc7471", + "EmailAddress": "name@somewhere.com", + "GroupID": "" +} +``` + +**Sample Response:** + +```json +{ + "Status": "OK", + "Message": "SSO Nonce created", + "Meta": "YTNiOGUzZjctYWZkYi00OTNhLTYwODItZTAzMDI3MjM0OTEw" +} +``` + +See [Single Sign On](/api-management/external-service-integration#single-sign-on-sso) documentation for how to use this token and more details. + +### URL Reload API + +Since the Dashboard can have multiple URLs associated with it. It is possible to force a URL reload by calling an API endpoint of the Dashboard API. + +#### Reload the Dashboard URLs + +| **Property** | **Description** | +| :------------ | :---------------------- | +| Resource URL | `/admin/system/reload` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /admin/system/reload HTTP/1.1 +Host: localhost:3000 +admin-auth:12345 +``` + +**Sample Response** + +```json +{ + "status": "ok" +} +``` + +### Export Assets API + +To make Tyk installations more portable, the Export API enables you to export key configuration objects required to back-up and re-deploy a basic Tyk Pro installation. + + + +To enable this feature, the minimum required versions for the Gateway and Dashboard are v2.3 and v1.3.1.2, respectively. + + + +#### Export Organizations + +The organization object is the most fundamental object in a Tyk setup, all other ownership properties hang off the relationship between an organization and its APIs, Policies and API Tokens. + +| **Property** | **Description** | +| :------------ | :------------------------------- | +| Resource URL | `/admin/organisations/{ORG-ID}` | +| Method | GET | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +GET /admin/organisations/54bd0ad9ff4329b88985aafb HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 +``` + +**Sample Response** + + +```json +{ + "id": "53ac07777cbb8c2d53000002", + "owner_name": "Test", + "owner_slug": "test", + "cname_enabled": true, + "cname": "my.domain.com", + "apis": [{ + "api_human_name": "API 2", + "api_id": "5fa2db834e07444f760b7ceb314209fb" + }, { + "api_human_name": "API 1", + "api_id": "7a6ddeca9244448a4233866938a0d6e2" + }, { + "api_human_name": "API 3", + "api_id": "109eacaa50b24b64651a1d4dce8ec385" + }], + "developer_quota": 123, + "developer_count": 21, + "event_options": { + "key_event": { + "webhook": "", + "email": "", + "redis": true + }, + "key_request_event": { + "webhook": "", + "email": "", + "redis": false + } + }, + "hybrid_enabled": false, + "ui": { + "languages": {}, + "hide_help": false, + "default_lang": "", + "login_page": {}, + "nav": {}, + "uptime": {}, + "portal_section": {}, + "designer": {}, + "dont_show_admin_sockets": false, + "dont_allow_license_management": false, + "dont_allow_license_management_view": false + } +} +``` + +#### Export APIs and Policies + +To export APIs and Policies you should use the standard `GET APIS` endpoint and `GET POLICIES` list endpoints. The output from these endpoints can be used by the [Import API](#import-assets-api). + +### Import Assets API + +The import API enables you to add *Organizations*, *APIs* and *Policies* back into a Tyk installation while retaining their base IDs so that they work together. + + + +To enable this feature, the minimum required versions for the Gateway and Dashboard are v2.3 and v1.3.1.2, respectively. + + + +#### Import Organization + +The [Organization object](#organizations) is the most fundamental object in a Tyk setup, all other ownership properties hang off the relationship between an Organization and its APIs, Policies and API Tokens. + +| **Property** | **Description** | +| :------------ | :---------------------------- | +| Resource URL | `admin/organisations/import` | +| Method | POST | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +POST /admin/organisations/import HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 + +{ + "id": "53ac07777cbb8c2d53000002", + "owner_name": "Test", + "owner_slug": "test", + "cname_enabled": true, + "cname": "my.domain.com", + "apis": [{ + "api_human_name": "API 2", + "api_id": "5fa2db834e07444f760b7ceb314209fb" + }, { + "api_human_name": "API 1", + "api_id": "7a6ddeca9244448a4233866938a0d6e2" + }, { + "api_human_name": "API 3", + "api_id": "109eacaa50b24b64651a1d4dce8ec385" + }], + "developer_quota": 123, + "developer_count": 21, + "event_options": { + "key_event": { + "webhook": "", + "email": "", + "redis": true + }, + "key_request_event": { + "webhook": "", + "email": "", + "redis": false + } + }, + "hybrid_enabled": false, + "ui": { + "languages": {}, + "hide_help": false, + "default_lang": "", + "login_page": {}, + "nav": {}, + "uptime": {}, + "portal_section": {}, + "designer": {}, + "dont_show_admin_sockets": false, + "dont_allow_license_management": false, + "dont_allow_license_management_view": false + } +} +``` + +#### Import APIs + +The import APIs operates on *lists* of APIs. + +| **Property** | **Description** | +| :------------ | :------------------- | +| Resource URL | `admin/apis/import` | +| Method | POST | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +POST /admin/apis/import HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 + +{ + "apis": [{ + "api_model": {}, + "api_definition": {...}, + "hook_references": [], + "is_site": false, + "sort_by": 0 +}, { + "api_model": {}, + "api_definition": {...}, + "hook_references": [], + "is_site": false, + "sort_by": 0 +}] +} +``` + +#### Import Policies + +The import Policies operates on *lists* of Policies. + +| **Property** | **Description** | +| :------------ | :----------------------- | +| Resource URL | `admin/policies/import` | +| Method | POST | +| Type | None | +| Body | None | +| Param | None | + +**Sample Request** + +```http +POST /admin/policies/import HTTP/1.1 +Host: localhost:3000 +admin-auth: 12345 + +{ + "Data": [{ + "_id": "57ed12fc30c55e6b890d37d8", + "access_rights": { + "5fa2db834e07444f760b7ceb314209fb": { + "allowed_urls": [], + "api_id": "5fa2db834e07444f760b7ceb314209fb", + "api_name": "New API 1", + "versions": ["Default"] + }, + "7a6ddeca9244448a4233866938a0d6e2": { + "allowed_urls": [], + "api_id": "7a6ddeca9244448a4233866938a0d6e2", + "api_name": "API1", + "versions": ["Default"] + } + }, + "active": true, + "date_created": "0001-01-01T00:00:00Z", + "hmac_enabled": false, + "is_inactive": false, + "key_expires_in": 0, + "last_updated": "1478791603", + "name": "Default", + "org_id": "53ac07777cbb8c2d53000002", + "partitions": { + "acl": false, + "quota": false, + "rate_limit": false + }, + "per": 60, + "quota_max": -1, + "quota_renewal_rate": 3600, + "rate": 1000, + "tags": [] + }, { + "_id": "5824343b30c55e52d5e6cfde", + "access_rights": { + "7a6ddeca9244448a4233866938a0d6e2": { + "allowed_urls": [], + "api_id": "7a6ddeca9244448a4233866938a0d6e2", + "api_name": "API 1", + "versions": ["Default"] + } + }, + "active": true, + "date_created": "0001-01-01T00:00:00Z", + "hmac_enabled": false, + "is_inactive": false, + "key_expires_in": 0, + "last_updated": "1478791761", + "name": "Test Policy", + "org_id": "53ac07777cbb8c2d53000002", + "partitions": { + "acl": false, + "quota": false, + "rate_limit": false + }, + "per": 1, + "quota_max": 100, + "quota_renewal_rate": 90000, + "rate": 10, + "tags": [] + }, { + "_id": "58172a2330c55e22afcd59af", + "access_rights": { + "109eacaa50b24b64651a1d4dce8ec385": { + "allowed_urls": [], + "api_id": "109eacaa50b24b64651a1d4dce8ec385", + "api_name": "API 3", + "versions": ["Default"] + }, + "5fa2db834e07444f760b7ceb314209fb": { + "allowed_urls": [], + "api_id": "5fa2db834e07444f760b7ceb314209fb", + "api_name": "API 2", + "versions": ["Default"] + }, + "7a6ddeca9244448a4233866938a0d6e2": { + "allowed_urls": [], + "api_id": "7a6ddeca9244448a4233866938a0d6e2", + "api_name": "API 1", + "versions": ["Default"] + }, + "d023576f836a4e407153009e8d95cf73": { + "allowed_urls": [], + "api_id": "d023576f836a4e407153009e8d95cf73", + "api_name": "Test API", + "versions": ["Default"] + } + }, + "active": true, + "date_created": "0001-01-01T00:00:00Z", + "hmac_enabled": false, + "is_inactive": false, + "key_expires_in": 2592000, + "last_updated": "1477913123", + "name": "Big Policy", + "org_id": "53ac07777cbb8c2d53000002", + "partitions": { + "acl": false, + "quota": false, + "rate_limit": false + }, + "per": 1, + "quota_max": 6000, + "quota_renewal_rate": 3600, + "rate": 10, + "tags": ["TEST-1", "TEST-2"] +}], + "Pages": 0 +} +``` + +## Exploring API Endpoint Designer + +### Classic APIs + +Tyk Dashboard's Endpoint Designer provides a graphical environment for the creation and update of your Tyk Classic APIs. + +The Endpoint Designer allows to configure all elements of your Tyk Classic API and consists of several tabs, plus a **Raw Definition** view which allows you to directly edit the Tyk Classic API Definition (in JSON format). Note that + +### Core Settings + +The Tyk Classic Endpoint Designer - Core Settings tab + +The **Core Settings** tab provides access to configure basic settings for the API: +- [Detailed logging](/api-management/logs-metrics#capturing-detailed-logs) +- API Settings including + - Listen path + - [API Categories](#governance-using-api-categories) +- Upstream settings including + - Upstream service (target) URL + - [Service Discovery](/planning-for-production/ensure-high-availability/service-discovery) +- [API Ownership](/api-management/user-management#api-ownership) +- [API level rate limiting](/api-management/rate-limit#configuring-the-rate-limiter-at-the-api-level) +- [Authentication](/api-management/client-authentication) + +### Versions + +The Tyk Classic Endpoint Designer - Versions tab + +The **Versions** tab allows you to create and manage [API versioning](/api-management/gateway-config-tyk-classic#tyk-classic-api-versioning) for the API. + +At the top of the Endpoint Designer, you can see which version you are currently editing. If you have more than one option, selecting it from the drop-down will load its endpoint configuration into the editor. + +### Endpoint Designer + +The Tyk Classic Endpoint Designer - Endpoint Designer tab + +The **Endpoint Designer** is where you can define endpoints for your API so that you can enable and configure Tyk middleware to [perform checks and transformations](/api-management/traffic-transformation) on the API traffic. + +In some cases, you will want to set global settings that affect all paths that are managed by Tyk. The **Global Version Settings** section will enable you to configure API-level [request](/api-management/traffic-transformation/request-headers#tyk-classic-api) and [response](/api-management/traffic-transformation/request-headers#tyk-classic-api) header transformation. + +### Advanced Options + +The Tyk Classic Endpoint Designer - Advanced Options tab + +The **Advanced Options** tab is where you can configure Tyk's other powerful features including: +- Upstream certificate management +- [API-level caching](/api-management/response-caching#configuring-the-cache-via-the-dashboard) including a button to invalidate (flush) the cache for the API +- [CORS](/api-management/gateway-config-tyk-classic#cross-origin-resource-sharing-cors) +- Add custom attributes to the API definition as *config data* that can be accessed by middleware +- Enable [context variables](/api-management/traffic-transformation/request-context-variables) so that they are extracted from requests and made available to middleware +- Manage *segment tags* if you are working with [sharded gateways](/api-management/multiple-environments#gateway-sharding) +- Manage client IP address [allow](/api-management/gateway-config-tyk-classic#ip-access-control) and [block](/api-management/gateway-config-tyk-classic#ip-access-control) lists +- Attach [webhooks](/api-management/gateway-events#event-handling-with-webhooks) that will be triggered for different events + +### Uptime Tests + +The Tyk Classic Endpoint Designer - Uptime Tests tab + +In the **Uptime Tests** tab you can configure Tyk's [Uptime Test](/api-management/gateway-config-tyk-classic#uptime-tests) functionality + +### Debugging + +The Tyk Classic Endpoint Designer - Debugging tab + +The **Debugging** tab allows you to test your endpoints before you publish or update them. You can also use it for testing any middleware plugins you have implemented. Any debugging you create will persist while still in the current API, enabling you to make changes in the rest of the API settings without losing the debugging scenario. + +The Debugging tab consists of the following sections: + +- Request +- Response +- Logs + +##### Request + +Debugging Request + +In this section, you can enter the following information: + +- Method - select the method for your test from the drop-down list +- Path - your endpoint to test +- Headers/Body - enter any header information, such as Authorization, etc. Enter any body information. For example, entering user information if creating/updating a user. + +Once you have entered all the requested information, click **Run**. Debugging Response and Log information will be displayed: + +##### Response + +Debugging Response + +The Response section shows the JSON response to your request. + +##### Logs + +Debugging Logs + +The debugging level is set to **debug** for the request. This outputs all logging information in the Endpoint Designer. In the Tyk Gateway logs you will see a single request. Any Error messages will be displayed at the bottom of the Logs output. + +## Traffic Analytics + +The Tyk Dashboard provides a full set of analytics functions and graphs that you can use to segment and view your API traffic and activity. The Dashboard offers a great way for you to debug your APIs and quickly pin down where errors might be cropping up and for which clients. + +[User Owned Analytics](/api-management/user-management#user-permissions), introduced in Tyk v5.1, can be used to limit the visibility of aggregate statistics to users when API Ownership is enabled. Due to the way that the analytics data are aggregated, not all statistics can be filtered by API and so may be inaccessible to users with the Owned Analytics permission. + + + +For the Tyk Dashboard's analytics functionality to work, you must configure both per-request and aggregated pumps for the database platform that you are using. For more details see the [Setup Dashboard Analytics](/api-management/tyk-pump#setup-dashboard-analytics) section. + + + + +## Analyzing API Traffic Activity + +### API Activity Dashboard + +The first screen (and main view) of the Tyk Dashboard will show you an overview of the aggregate usage of your APIs, this view includes the number of hits, the number of errors and the average latency over time for all of your APIs as an average: + +API Activity Dashboard + +You can toggle the graphs by clicking the circular toggles above the graph to isolate only the stats you want to see. + +Use the Start and End dates to set the range of the graph, and the version drop-down to select the API and version you wish to see traffic for. + +You can change the granularity of the data by selecting the granularity drop down (in the above screenshot: it is set to β€œDay”). + +The filter by tag option, in a graph view, will enable you to see the graph filtered by any tags you add to the search. + +Below the aggregate graph, you’ll see an error breakdown and endpoint popularity chart. These charts will show you the overall error type (and code) for your APIs as an aggregate and the popularity of the endpoints that are being targeted by your clients: + +Error Breakdown and Endpoints + + + +From Tyk v5.1 (and LTS patches v4.0.14 and v5.0.3) the Error Breakdown and Endpoint Popularity charts will not be visible to a user if they are assigned the [Owned Analytics](/api-management/user-management#user-permissions) permission. + + + +### Activity Logs + +When you look through your Dashboard and your error breakdown statistics, you'll find that you will want to drill down to the root cause of the errors. This is what the Log Browser is for. + +The Log Browser will isolate individual log lines in your analytics data set and allow you to filter them by: + +* API Name +* Token ID (hashed) +* Errors Only +* By Status Code + +You will be presented with a list of requests, and their metadata: + +Log Viewer + +Click a request to view its details. + +Log Viewer Details + +#### Self-Managed Installations Option + +In an Self-Managed installation, if you have request and response logging enabled, then you can also view the request payload and the response if it is available. +To enable request and response logging, please take a look at [useful debug modes](/api-management/troubleshooting-debugging#capturing-detailed-logs) . + +**A warning on detailed logging:** This mode generates a very large amount of data, and that data exponentially increases the size of your log data set, and may cause problems with delivering analytics in bulk to your MongoDB instances. This mode should only be used to debug your APIs for short periods of time. + + + +Detailed logging is not available for Tyk Cloud Classic customers. + + + +### Activity by API + +To get a tabular view of how your API traffic is performing, you can select the **Activity by API** option in the navigation and see a tabular view of your APIs. This table will list out your APIs by their traffic volume and you'll be able to see when they were last accessed: + +Activity per API + +You can use the same range selectors as with the Dashboard view to modify how you see the data. However, granularity and tag views will not work since they do not apply to a tabulated view. + +If you select an API name, you will be taken to the drill-down view for that specific API, here you will have a similar Dashboard as you do with the aggregate API Dashboard that you first visit on log in, but the whole view will be constrained to just the single API in question: + +Traffic per API: CLosed graph + +You will also see an error breakdown and the endpoint popularity stats for the API: + +API error breakdown pie chart + +Tyk will try to normalize endpoint metrics by identifying IDs and UUIDs in a URL string and replacing them with normalized tags, this can help make your analytics more useful. It is possible to configure custom tags in the configuration file of your Tyk Self-Managed or Multi-Cloud installation. + + + +From Tyk v5.1 (and LTS patches v4.0.14 and v5.0.3) the Error Breakdown and Endpoint Popularity charts will not be visible to a user if they are assigned the [Owned Analytics](/api-management/user-management#user-permissions) permission. + + + +### Activity by Key + +You will often want to see what individual keys are up to in Tyk, and you can do this with the **Activity per Key** section of your analytics Dashboard. This view will show a tabular layout of all keys that Tyk has seen in the range period and provide analytics for them: + +Activity per Token + +You'll notice in the screenshot above that the keys look completely different to the ones you can generate in the key designer (or via the API), this is because, by default, Tyk will hash all keys once they are created in order for them to not be snooped should your key-store be breached. + +This poses a problem though, and that is that the keys also no longer have any meaning as analytics entries. You'll notice in the screenshot above, one of the keys is appended by the text **TEST_ALIAS_KEY**. This is what we call an Alias, and you can add an alias to any key you generate and that information will be transposed into your analytics to make the information more human-readable. + +The key `00000000` is an empty token, or an open-request. If you have an API that is open, or a request generates an error before we can identify the API key, then it will be automatically assigned this nil value. + +If you select a key, you can get a drill down view of the activity of that key, and the errors and codes that the token has generated: + +Traffic activity by key graph + +Errors by Key + +(The filters in this view will not be of any use except to filter by API Version). + + + +From Tyk v5.1 (and LTS patches v4.0.14 and v5.0.3) the Traffic per Key screen will not be visible to a user if they are assigned the [Owned Analytics](/api-management/user-management#user-permissions) permission. + + + +### Activity by endpoint + +To get a tabular view of how your API traffic is performing at the endpoint level, you can select the Activity by Endpoint option in the navigation and see a tabular view of your API endpoints. This table will list your API endpoints by their traffic volume and you’ll be able to see when they were last accessed: + +Activity by endpoint + +#### Controlling which endpoints appear in the analytics data + +The aggregate pumps have an option to `track_all_paths` which will ensure that all analytics records generated by the Tyk Gateway will be included in the aggregated statistics on the Endpoint Popularity screen. Set this to `true` to capture all endpoints in the aggregated data and subsequently on the Dashboard page. + +You can alternatively select only a subset of the endpoints to include in the aggregated data by setting `track_all_paths` to `false` and identifying specific endpoints to be "tracked". These are identified by the `TrackPath` [flag](/api-management/tyk-pump#trackpath) being set to `true` in the record. In this configuration, the Pump will only include transaction records from "tracked" endpoints in the aggregated data. + +Tyk Gateway will set `TrackPath` to `true` in transaction records generated for endpoints that have the track endpoint middleware enabled. + + + +The *track endpoint* middleware only affects the inclusion of endpoints in the per-endpoint aggregates, it does not have any impact on other [aggregated data](/api-management/logs-metrics#aggregated-analytics) nor the [per-request data](/api-management/dashboard-configuration#activity-logs). + + + +#### Selecting Tyk OAS APIs endpoints to be tracked + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. The `path` can contain wildcards in the form of any string bracketed by curly braces, for example `{user_id}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The track endpoint middleware (`trackEndpoint`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `trackEndpoint` object has the following configuration: + - `enabled`: enable the middleware for the endpoint + +For example: +```json {hl_lines=["39-41"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-track-endpoint", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-track-endpoint", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-track-endpoint/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "trackEndpoint": { + "enabled": true + } + } + } + } + } +} +``` + +In this example the track endpoint middleware has been configured for requests to the `GET /anything` endpoint. These requests will appear in the Endpoint Popularity analytics screen, located within the API Usage section of Tyk Dashboard. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the track endpoint middleware. + +##### Configuring the middleware in the API Designer + +Adding the track endpoint middleware to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Track Endpoint middleware** + + Select **ADD MIDDLEWARE** and choose the **Track Endpoint** middleware from the *Add Middleware* screen. + + Adding the Track Endpoint middleware + +3. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +#### Selecting Tyk Classic API endpoints to be tracked +If you are working with Tyk Classic APIs then you must add a new `track_endpoints` object to the `extended_paths` section of your API definition. + +The `track_endpoints` object has the following configuration: +- `path`: the endpoint path +- `method`: the endpoint HTTP method + +For example: +```.json {linenos=true, linenostart=1} +{ + "extended_paths": { + "track_endpoints": [ + { + "disabled": false, + "path": "/anything", + "method": "GET", + } + ] + } +} +``` + +In this example the track endpoint middleware has been configured for HTTP `GET` requests to the `/anything` endpoint. These requests will appear in the Endpoint Popularity analytics screen, located within the API Usage section of Tyk Dashboard. + +##### Configuring the middleware in the API Designer + +You can use the API Designer in the Tyk Dashboard to configure the track endpoint middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to allow access. Select the **Track endpoint** plugin. + + Select the middleware + +2. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware for the selected endpoint. + +### Activity by Location + +Tyk will attempt to record GeoIP based information based on your inbound traffic. This requires a MaxMind IP database to be available to Tyk and is limited to the accuracy of that database. + +You can view the overview of what the traffic breakdown looks like per country, and then drill down into the per-country traffic view by selecting a country code from the list: + +Geographic Distribution + + + +From Tyk v5.1 (and LTS patches v4.0.14 and v5.0.3) the Geographic Distribution screen will not be visible to a user if they are assigned the [Owned Analytics](/api-management/user-management#user-permissions) permission. + + + +**MaxMind Settings** + +To use a MaxMind database, see [MaxMind Database Settings](/tyk-oss-gateway/configuration#analytics_configenable_geo_ip) in the Tyk Gateway Configuration Options. + +### Activity by Error + +The error overview page limits the analytics down to errors only, and gives you a detailed look over the range of the number of errors that your APIs have generated. This view is very similar to the Dashboard, but will provide more detail on the error types: + +Error Overview + + + +From Tyk v5.1 (and LTS patches v4.0.14 and v5.0.3) the Errors by Category data will not be visible to a user if they are assigned the [Owned Analytics](/api-management/user-management#user-permissions) permission. + + + +### Activity by Oauth Client + +Traffic statistics are available on a per OAuth Client ID basis if you are using the OAuth mode for one of your APIs. To get a breakdown view of traffic aggregated to a Client ID, you will need to go to the **System Management -> APIs** section and then under the **OAuth API**, there will be a button called **OAuth API**. Selecting an OAuth client will then show its aggregate activity + +OAuth Client + +In the API list view – an **OAuth Clients** button will appear for OAuth enabled APIs, use this to browse to the Client ID and the associated analytics for that client ID: + +OAuth Client Analytics Data + +You can view the analytics of individual tokens generated by this Client ID in the regular token view. + + + +From Tyk v5.1 (and LTS patches v4.0.14 and v5.0.3) the Traffic per OAuth Client ID charts will not be visible to a user if they are assigned the [Owned Analytics](/api-management/user-management#user-permissions) permission. + + + +--- + +## Governance using API Categories + +API categorization is a governance feature provided within the Tyk Dashboard that helps you to manage a portfolio of APIs. You can filter the list of APIs visible in the Dashboard UI or to be returned by the Dashboard API by category. You can assign an API to any number of categories and any number of APIs to a category. All category names are entirely user defined. + +### When to use API categories +#### Managing a large portfolio of APIs +As a platform manager looking after a large portfolio of APIs, if I need to make changes to a sub-set of APIs, it's cumbersome having to identify which APIs they are and then to find them one-by-one in the list. If I have assigned categories to my APIs then I can filter quickly and easily to work with that sub-set. What's really powerful is that an API can appear in as many different categories as I like. + +#### Multi-tenant deployment +Multi-tenant deployments with [role-based access control](/api-management/user-management#) enabled allows an admin user to give different users or groups access to a sub-set of the entire API portfolio. Categories can be aligned with the API ownership rules that you have deployed to allow filtering the list of APIs for those visible to each separate user group/team. + +### How API categories work +API categories with Tyk are a very simple concept - you can define any string as a category and then tag the relevant APIs with that string. + +Categories might refer to the API's general focus (e.g. 'weather' or 'share prices'); they might relate to geographic location (e.g. 'APAC' or 'EMEA'); they might refer to technical markers (e.g. 'dev', 'test'); or anything else you might need. It's completely up to you. + +Categories can be defined, added to and removed from APIs without limitation. + +#### Tyk OAS APIs +When a Tyk OAS API is assigned to a category, the category name (string) is appended to a list in the database object where the API definition is stored by Tyk Dashboard. No change is made to the API definition itself. + +#### Tyk Classic APIs +When a Tyk Classic API is assigned to a category, the category name (string) is appended to the `name` field in the API definition using a `#` qualifier. For example, let's say you have an API with this (partial) API definition: + +```json +{ + "name": "my-classic-api" +} +``` +You can add it to the `global` and `staging` categories by updating the API definition to: + +```json +{ + "name": "my-classic-api #global #staging" +} +``` +When a Tyk Classic API is migrated from one environment to another using Tyk Sync, it will retain any category labels that it has been assigned. + + + +The use of the `#` qualifier to identify a category prevents the use of `#` in your API names; this is not an issue when working with Tyk OAS APIs. + + + +### Using API categories +API categories can be added and removed from APIs within the [API Designer](#api-designer), via the [Tyk Dashboard API](#tyk-dashboard-api), or via [Tyk Operator](/api-management/automations/operator#what-is-tyk-operator). + +#### API Designer +The API Designer in the Tyk Dashboard UI provides a simple method for assigning APIs to categories, removing categories and filtering the API list by category. + +##### Managing categories with Tyk OAS APIs +When working with Tyk OAS APIs, the API Designer has a separate **Access** tab where you can configure the categories to which the API is assigned. +Tyk OAS API Designer + +You can choose existing categories from the drop-down or define new categories simply by typing in the box. You can also remove the API from a category by clicking on the `x` or deleting the category from the box. +Managing categories for a Tyk OAS API + +##### Managing categories with Tyk Classic APIs +When working with Tyk Classic APIs, the API Designer has a box in the **API Settings** section where you can configure the categories to which the API is assigned. +Tyk Classic API Designer + +You can choose existing categories from the list that appears when you click in the box or you can define new categories simply by typing in the box. You can also remove the API from a category by clicking on the `x` or deleting the category from the box. +Managing categories for a Tyk Classic API + +##### Filtering the API list +When you have APIs assigned to categories, you can choose to view only the APIs in a specific category by using the **FILTER BY API CATEGORY** drop-down on the **Created APIs** screen. +View APIs in a category + +#### Tyk Dashboard API +The [Tyk Dashboard API](/tyk-dashboard-api) provides endpoints to manage categories directly, if you are not using the API Designer. + +When working with Tyk OAS APIs, you can manage categories for an API using these endpoints: + +| Method | Endpoint path | Action | +| :-------- | :-------------------------------------- | :---------------------------------------------------------------------------------------- | +| `PUT` | `/api/apis/oas/{apiID}/categories` | Assign a list of categories to the specified API +| `GET` | `/api/apis/oas/{apiID}/categories` | Retrieve the list of categories assigned to the specified API | + +When working with Tyk Classic APIs, you manage categories for an API by modifying the `name` field in the API definition and then updating the API in Tyk with that using these endpoints: + +| Method | Endpoint | Action | +| :-------- | :-------------------------------------- | :---------------------------------------------------------------------------------------- | +| `PUT` | `/api/apis/{apiID}` | Update the API definition for the specified API - CRUD category tags in the `name` field | +| `GET` | `/api/apis/{apiID}` | Retrieve the API definition for the specified API - category tags in `name` field | + +These endpoints will return information for categories across all APIs in the system (both Tyk OAS and Tyk Classic): + +| Method | Endpoint path | Action | +| :-------- | :-------------------------------------- | :---------------------------------------------------------------------------------------- | +| `GET` | `/api/apis/categories` | Retrieve a list of all categories defined in the system and the number of APIs in each | +| `GET` | `/api/apis?category={category_name}` | Retrieve a list of all APIs assigned to the specified category | + +#### Tyk Operator + +You can manage categories using Tyk Operator custom resources. Please refer to [Tyk Operator](/api-management/automations/operator#api-categories) documentation to see how to manage API categories for Tyk OAS APIs and Tyk Classic APIs. + +## Governance using API Templates + +API Templates are an API governance feature provided to streamline the process of creating Tyk OAS APIs. An API template is an asset managed by Tyk Dashboard that is used as the starting point - a blueprint - from which you can create a new Tyk OAS API definition. + +The default template is a blank API definition; your custom templates will contain some configuration, for example cache configuration or default endpoints with pre-configured middleware. When you create a new API using a custom template, whether importing an OpenAPI document or building the API from scratch in the Tyk API Designer, those elements of the API configuration included in the template will be pre-configured for you. + + + +API Templates are exclusive to [Tyk OAS APIs](/api-management/gateway-config-introduction#api-definitions) and can be managed via the Tyk Dashboard API or within the Tyk Dashboard UI. + + + +### When to use API templates +#### Gateway agnostic API design +When working with OpenAPI described upstream service APIs, your service developers do not need to learn about Tyk. You can create and maintain a suitable suite of templates that contain the Tyk-specific configuration (`x-tyk-api-gateway`) that you require for your externally published API portfolio. Creating an API on Tyk is as simple as importing the OpenAPI document and selecting the correct template. Tyk will combine the OpenAPI description with the template to produce a valid Tyk OAS API. + +#### Standardizing API configuration +If you have specific requirements for your external facing APIs - for example authentication, caching or even a healthcheck endpoint - you can define the appropriate API templates so that when APIs are created on Tyk these fields are automatically and correctly configured. + +### How API templating works +An API template is a blueprint from which you can build new APIs - it is an incomplete JSON representation of a Tyk OAS API definition that you can use as the starting point when creating a new API on Tyk. There is no limit to how much or how little of the API definition is pre-configured in the template (such that when you choose to create a new API without choosing a template, the blank API definition that you start from is itself a template). + +Templates are used only during the creation of an API, they cannot be applied later. Before you can use a template as the basis for an API, you must register the template with Tyk Dashboard. + +#### Structure of an API template +An API template asset has the following structure: + - `id`: a unique string type identifier for the template + - `kind`: the asset type, which is set to `oas-template` + - `name`: human-readable name for the template + - `description`: a short description of the template, that could be used for example to indicate the configuration held within the template + - `data`: a Tyk OAS API definition, the content of which will be used for templating APIs + - `_id`: a unique identifier assigned by Tyk when the template is registered in the Dashboard database + +#### Creating an API from a template +When you use a template during the [creation](/api-management/gateway-config-managing-oas#creating-an-api) of an API, the fields configured in `data` will be pre-set in your new API. You are able to modify these during and after creation of the template. No link is created between the API and the template, so changes made to the API will not impact the template. + +#### Merging with an OpenAPI description or Tyk OAS API definition +When you use a template during the creation of an API where you [import](/api-management/gateway-config-managing-oas#importing-an-openapi-description-to-create-an-api) the OpenAPI document or a full Tyk OAS API definition, the template is combined with the imported OAS description. If the `x-tyk-api-gateway` extension exists in the template, it will be applied to the newly created API. + +Where there are clashes between configuration in the OpenAPI description and the template: + - for maps, such as `paths` and `components`, new keys will be added alongside any existing ones from the template + - if a key in the OpenAPI description matches one in the template, the OpenAPI description takes precedence + - for array properties, such as `servers` and `tags`, values in the OpenAPI description will replace those in the template + +
+ +If you're using the API Designer in the Tyk Dashboard UI, then you can find details and examples of how to work with API templates [here](#working-with-api-templates-using-the-template-designer). + +If you're using the Tyk Dashboard API, then you can find details and examples of how to work with API templates [here](#working-with-api-templates-using-the-dashboard-api). + +### Working with API Templates using the Template Designer + +[API Templates](#governance-using-api-templates) are an API governance feature provided to streamline the process of creating Tyk OAS APIs. An API template is an asset managed by Tyk Dashboard that is used as the starting point - a blueprint - from which you can create a new Tyk OAS API definition. + +The Tyk Dashboard UI provides the following functionality to support working with API templates: + - Creating templates + - [new template](#creating-a-new-api-template) + - [from an existing API](#creating-a-template-from-an-existing-api) + - Using templates + - [when creating an API](#using-a-template-when-creating-a-new-api) + - [when importing an OpenAPI description or API definition](#using-a-template-when-importing-an-openapi-description-or-api-definition) + - [Managing templates](#managing-templates) + +API Templates can be found in the **API Templates** section of the **API Management** menu in the Tyk Dashboard. This screen lists all the templates currently registered with Tyk and displays their names and short descriptions. It also gives access to options to create and manage templates. + +API Templates + + + +API Templates are exclusive to [Tyk OAS APIs](/api-management/gateway-config-introduction#api-definitions). + + + +#### Creating templates +API templates can be created starting from a blank template or from an existing API + +##### Creating a new API template +To create a template, simply visit the **API Templates** section of the Tyk Dashboard and select **ADD TEMPLATE**. + +This will take you to the **Create API Template** screen, where you can configure all aspects of the template. + +The template does not need to be a complete or valid API definition however as a minimum: + - you must give the template a **Name** + - you must give the template a **Description** + +In this example, we have configured just the Name, Description, Gateway Status and Access settings: + +Configure the template + +When you have configured all of the API-level and endpoint-level settings you require, select **SAVE TEMPLATE** to create and register the template with Tyk. + +Returning to the **API Template** screen you will see your new template has been added to the list and assigned a unique `id` that can be used to access the template from the [Tyk Dashboard API](#structure-of-an-api-template): + +Template has been successfully created + +##### Creating a template from an existing API +You can use an existing API deployed on Tyk as the basis for a new API template - this is a great way to build up a portfolio of standardized APIs once you've got your first one correctly configured. + +From the **Created APIs** screen within the **APIs** section of the Tyk Dashboard, select the API that you wish to use as your starting point. In the **ACTIONS** drop-down select the **CREATE API TEMPLATE** option. + +Select Create API Template + +This will take you to the **Create API Template** screen, where you can configure all aspects of the template. + +The template does not need to be a complete or valid API definition however as a minimum: + - you must give the template a **Name** + - you must give the template a **Description** + +In this example, we have configured the Name and Description. The base API included response header transformation middleware on the `/anything` endpoint and API-level cache configuration, all of which will be configured within the template. + +Configure the template +Cache settings inherited from base API +Endpoint settings inherited from base API + +When you have configured all of the API-level and endpoint-level settings you require, select **SAVE TEMPLATE** to create and register the template with Tyk. + +Returning to the **API Template** screen you will see your new template has been added to the list and assigned a unique `id` that can be used to access the template from the [Tyk Dashboard API](#structure-of-an-api-template). + +Template has been successfully created + +#### Using templates +API templates are used as the starting point during the creation of a new API. They can be applied in all of the methods supported by Tyk for creating new APIs. + +##### Using a template when creating a new API +There are two ways to base a new API, created entirely within the Tyk Dashboard's API Designer, on a template that you've created and registered with Tyk. + +You can go from the **API Template** screen - for the template you want to use, select **CREATE API FROM TEMPLATE** from the **ACTIONS** menu: +Select Create API from template + +Or, from the **Created APIs** screen, select **ADD NEW API** as normal and then select the template you want to use from the **API Template** section: +Select the template you want to use + +Both of these routes will take you through to the API Designer, where the settings from your API template will be pre-configured. + +In this example, we applied "My first template" that we created [here](#creating-a-new-api-template). You can see that the Gateway Status and Access fields have been configured: +The API with template applied + +##### Using a template when importing an OpenAPI description or API definition +From the **Import API** screen, if you select the OpenAPI **type** then you can create an API from an OpenAPI description or Tyk OAS API definition; choose the appropriate method to provide this to the Dashboard: +- paste the JSON into the text editor +- provide a plain text file containing the JSON +- provide a URL to the JSON +Options when importing an OpenAPI description + +After pasting the JSON or locating the file, you can select the template you want to use from the **API Template** section: +Select the template you want to use + +In this example we used this simple OpenAPI description and selected "My second template" that we created [here](#creating-a-template-from-an-existing-api): + +``` json {linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "my-open-api-document", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://httpbin.org" + } + ], + "paths": { + "/xml": { + "get": { + "operationId": "xmlget", + "responses": { + "200": { + "description": "" + } + } + } + } + } +} +``` +The API that is created has both `/xml` and `/anything` endpoints defined, with API-level caching configured. You can see the API definition [here](https://gist.github.com/andyo-tyk/5d5cfeda404ce1ba498bbf4b9c105cf0). + +#### Managing templates +The Dashboard UI allows you to edit and delete templates after they have been created and registered with the Tyk Dashboard + +##### Editing a template +You can make changes to a template that has been registered with Tyk from the **API Templates** screen. For the template that you want to modify, simply select **EDIT TEMPLATE** from the **ACTIONS** menu: +Accessing the API template + +This will take you to the **API Template Details** screen where you can view the current template configuration. If you want to make changes, simply select **EDIT** to make the fields editable: +Modifying the API template + +Alternatively you can view and modify the raw JSON for the template by selecting **VIEW RAW TEMPLATE** from the **ACTIONS** menu: +Modifying the API template JSON + +You'll need to select **SAVE TEMPLATE** to apply your changes from either screen. + +##### Deleting a template +You can delete a template from your Tyk Dashboard from the **API Template Details** screen. This screen can be accessed by selecting the template from the **API Templates** screen (either by clicking on the template name, or selecting **EDIT TEMPLATE** from the **ACTIONS** menu): +Accessing the API template +Accessing the API template + +From the **API Template Details** screen you can select **DELETE TEMPLATE** from the **ACTIONS** menu: +Deleting the API template + + + +You will be asked to confirm the deletion, because this is irrevocable. Once confirmed, the template will be removed from the database and cannot be recovered. + + + +### Working with API Templates using the Dashboard API + +[API Templates](#governance-using-api-templates) are an API governance feature provided to streamline the process of creating Tyk OAS APIs. An API template is an asset managed by Tyk Dashboard that is used as the starting point - a blueprint - from which you can create a new Tyk OAS API definition. + +The Tyk Dashboard API provides the following functionality to support working with API templates: + - [registering a template with Tyk Dashboard](#registering-a-template-with-tyk-dashboard) + - [applying a template when creating an API from an OpenAPI document](#applying-a-template-when-creating-an-api-from-an-openapi-document) + - [applying a template when creating an API from a Tyk OAS API definition](#applying-a-template-when-creating-an-api-from-a-tyk-oas-api-definition) + + + + + API Templates are exclusive to [Tyk OAS APIs](/api-management/gateway-config-introduction#api-definitions). + + + +#### Structure of an API template +An API template asset has the following structure: + - `id`: a unique string type identifier for the template + - `kind`: the asset type, which is set to `oas-template` + - `name`: human-readable name for the template + - `description`: a short description of the template, that could be used for example to indicate the configuration held within the template + - `data`: a Tyk OAS API definition, the content of which will be used for templating APIs + - `_id`: a unique identifier assigned by Tyk when the template is registered in the Dashboard database + +#### Registering a template with Tyk Dashboard +To register an API template with Tyk, you pass the asset in the body of a `POST` request to the dashboard's `/api/assets` endpoint. + +For example, if you send this command to the endpoint: +``` bash {linenos=true, linenostart=1} +curl --location 'http://localhost:3000/api/assets' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer d9957aff302b4f5e5596c86a685e63d8' \ +--data '{ + "kind": "oas-template", + "name": "my-template", + "description": "My first template", + "id": "my-unique-template-id", + "data": { + "info": { + "title": "", + "version": "" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "post": { + "operationId": "anythingpost", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "middleware": { + "global": { + "cache": { + "enabled": true, + "timeout": 5, + "cacheAllSafeRequests": true + } + }, + "operations": { + "anythingpost": { + "requestSizeLimit": { + "enabled": true, + "value": 100 + } + } + } + } + } + } +}' +``` + +Tyk will respond with `HTTP 201 Created` and will provide this payload in response: +``` json +{ + "Status": "success", + "Message": "asset created", + "Meta": "65e8c352cb71918520ff660c", + "ID": "my-unique-template-id" +} +``` + +Here `Meta` contains the database ID (where the asset has been registered in the persistent storage) and `ID` contains the unique identifier for the template. This unique identifier will be automatically generated by Tyk if none was provided in the `id` of the template asset provided in the `curl` request. + +#### Applying a template when creating an API from an OpenAPI document +When creating an API on Tyk using an OpenAPI document describing your upstream service, you can use the `/apis/oas/import` endpoint to import the OpenAPI description and apply it to your API. + +If you have a template registered with your Dashboard, you can use this as the starting point for your new API. Tyk will combine the OpenAPI document with the template, automating the configuration of any element in the Tyk OAS API definition as defined in your chosen template. + +You'll need to identify the template to be used during the import. You can use either its unique `id` or the database ID that was assigned when the template was [registered with Tyk Dashboard](#registering-a-template-with-tyk-dashboard). You provide either the `id` or `_id ` in the `templateID` query parameter in the call to `/oapis/oas/import`. + +For example: +``` bash {linenos=true, linenostart=1} +curl --location 'http://localhost:3000/api/apis/oas/import?templateID=my-unique-template-id' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer ' \ +--data '{ + "components": {}, + "info": { + "title": "my-open-api-document", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://httpbin.org" + } + ], + "paths": { + "/xml": { + "get": { + "operationId": "xmlget", + "responses": { + "200": { + "description": "" + } + } + } + } + } +}' +``` +Tyk will respond with `HTTP 200 OK` and will provide this payload in response: +``` json +{ + "Status": "OK", + "Message": "API created", + "Meta": "65e8c4f4cb71918520ff660d", + "ID": "970560005b564c4755f1db51ca5660e6" +} +``` + +Here `Meta` contains the database ID (where the API has been registered in the persistent storage) and `ID` contains the unique identifier for the API. This unique identifier will be automatically generated by Tyk as none was provided in the `id` field of the `x-tyk-api-gateway.info` field provided in the `curl` request. + +The new Tyk OAS API will have this definition, combining the OpenAPI description provided in the body of the `curl` request with the template with Id `my-unique-template-id`: +``` json {linenos=true, linenostart=1} +{ + "info": { + "title": "my-open-api-document", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://localhost:8181/" + }, + { + "url": "http://httpbin.org" + } + ], + "security": [], + "paths": { + "/anything": { + "post": { + "operationId": "anythingpost", + "responses": { + "200": { + "description": "" + } + } + } + }, + "/xml": { + "get": { + "operationId": "xmlget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "components": { + "securitySchemes": {} + }, + "x-tyk-api-gateway": { + "info": { + "dbId": "65e8c4f4cb71918520ff660d", + "id": "970560005b564c4755f1db51ca5660e6", + "orgId": "65d635966ec69461e0e7ee52", + "name": "my-open-api-document", + "state": { + "active": true, + "internal": false + } + }, + "middleware": { + "global": { + "cache": { + "cacheResponseCodes": [], + "cacheByHeaders": [], + "timeout": 5, + "cacheAllSafeRequests": true, + "enabled": true + } + }, + "operations": { + "anythingpost": { + "requestSizeLimit": { + "enabled": true, + "value": 100 + } + } + } + }, + "server": { + "listenPath": { + "strip": true, + "value": "/" + } + }, + "upstream": { + "url": "http://httpbin.org" + } + } +} +``` +Note that the `GET /xml` endpoint from the OpenAPI description and the `POST /anything` endpoint from the template (complete with `requestSizeLimit` middleware) have both been defined in the API definition. API-level caching has been enabled, as configured in the template. Tyk has included the `server` entry from the OpenAPI description (which points to the upstream server) and added the API URL on Tyk Gateway ([as explained here](/api-management/gateway-config-tyk-oas#modifying-the-openapi-description)). + +#### Applying a template when creating an API from a Tyk OAS API definition +When creating an API using a complete Tyk OAS API definition (which includes `x-tyk-api-gateway`), you can use the `/apis/oas` endpoint to import the API defintiion. + +If you have a template registered with your Dashboard, you can use this as the starting point for your new API. Tyk will combine the API definition with the template, automating the configuration of any element defined in your chosen template. + +You'll need to identify the template to be used during the import. You can use either its unique `id` or the database ID that was assigned when the template was [registered with Tyk Dashboard](#registering-a-template-with-tyk-dashboard). You provide either the `id` or `_id` in the `templateID` query parameter in the call to `/apis/oas`. + +For example: +``` bash {linenos=true, linenostart=1} +curl --location 'http://localhost:3000/api/apis/oas?templateID=my-unique-template-id' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer ' \ +--data '{ + "components": {}, + "info": { + "title": "example-api", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/json": { + "get": { + "operationId": "jsonget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-api", + "state": { + "active": true, + "internal": false + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "strip": true, + "value": "/example-api/" + } + }, + "middleware": { + "operations": { + "jsonget": { + "transformResponseHeaders": { + "enabled": true, + "add": [ + { + "name": "X-Foo", + "value": "bar" + } + ] + } + } + } + } + } +}' +``` +Tyk will respond with `HTTP 200 OK` and will provide this payload in response: +``` json +{ + "Status": "OK", + "Message": "API created", + "Meta": "65e98ca5cb71918520ff6616", + "ID": "b8b693c5e28a49154659232ca615a7e8" +} +``` + +Here `Meta` contains the database ID (where the API has been registered in the persistent storage) and `ID` contains the unique identifier for the API. This unique identifier will be automatically generated by Tyk as none was provided in the `id` field of the `x-tyk-api-gateway.info` field provided in the `curl` request. + +The new Tyk OAS API will have this definition, combining the Tyk OAS API definition provided in the body of the `curl` request with the template with Id `my-unique-template-id`: + +``` json {linenos=true, linenostart=1} +{ + "info": { + "title": "example-api", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://localhost:8181/example-api/" + } + ], + "security": [], + "paths": { + "/anything": { + "post": { + "operationId": "anythingpost", + "responses": { + "200": { + "description": "" + } + } + } + }, + "/json": { + "get": { + "operationId": "jsonget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "components": { + "securitySchemes": {} + }, + "x-tyk-api-gateway": { + "info": { + "dbId": "65e98ca5cb71918520ff6616", + "id": "b8b693c5e28a49154659232ca615a7e8", + "orgId": "65d635966ec69461e0e7ee52", + "name": "example-api", + "state": { + "active": true, + "internal": false + } + }, + "middleware": { + "global": { + "cache": { + "cacheResponseCodes": [], + "cacheByHeaders": [], + "timeout": 5, + "cacheAllSafeRequests": true, + "enabled": true + } + }, + "operations": { + "anythingpost": { + "requestSizeLimit": { + "enabled": true, + "value": 100 + } + }, + "jsonget": { + "transformResponseHeaders": { + "enabled": true, + "add": [ + { + "name": "X-Foo", + "value": "bar" + } + ] + } + } + } + }, + "server": { + "listenPath": { + "strip": true, + "value": "/example-api/" + } + }, + "upstream": { + "url": "http://httpbin.org/" + } + } +} +``` +Note that the `GET /json` endpoint from the OpenAPI description and the `POST /anything` endpoint from the template (complete with `requestSizeLimit` middleware) have both been defined in the API definition. API-level caching has been enabled, as configured in the template. + +## Extend Permissions using Open Policy Agent (OPA) + +### Overview + +The Tyk Dashboard permission system can be extended by writing custom rules using an Open Policy Agent (OPA). The rules engine works on top of your Dashboard API, which means you can control not only access rules, but also behavior of all Dashboard APIs (except your public developer portal). + +To give you some inspiration here are some ideas of the rules you can implement now: + +* Enforce HTTP proxy option for all APIs for which the target URL does not point at the internal domain +* Control access for individual fields. For example, do not allow changing the API "active" status (e.g. deploy), unless you have a specific permission set (and make new permissions to be available to the Dashboard/API). Custom permissions can be creating using the [Additional Permissions API](/api-management/dashboard-configuration#additional-permissions-api) +* Have a user(or group) which has read access to one APIs and write to another +OPA rule engine put on top of Dashboard API, which means you can control the behavior of all APIs (except public developer portal) + +We have a video that demonstrates how our Open Policy Agent enables you to add custom permissions. + + + +#### Configuration + +By default the Dashboard OPA engine is turned off, and you need to explicitly enable it via your Dashboard `tyk_analytics.conf` file. +You can then control OPA functionality on a global level via your `tyk_analytics.conf` file, or at an organization level using either the [OPA API](/api-management/dashboard-configuration#open-policy-agent-api) or the [Dashboard](#using-the-open-policy-agent-in-the-dashboard). + +| Key | Type | Description | Example | +| :------------------------------------- | :--------------- | :------------------------------------------------------------------------------------------------------------------------ | :----------------------------- | +| security.open_policy.enabled | boolean | Toggle support for OPA | false | +| security.open_policy.debug | boolean | Enable debugging mode, prints a lot of information to the console | false | +| security.open_policy.enable_api | boolean | Enable access to the OPA API, even for users with Admin role | false | +| security.additional_permissions | string map | Add custom user/user_group permissions. You can use them in your rules, and they will be displayed in the Dashboard | `{"key": "human name"}` | + +#### Example + +```json +"basic-config-and-security/security": { + "open_policy": { + "enabled":true, + "debug": true, + "enable_api": true + }, + "additional_permissions": {} +} +``` + + +With the OPA turned on, the majority of the security rules will be dynamically evaluated based on these rules. + +Additionally, users can modify OPA rules, and define their own, through the [OPA API](/api-management/dashboard-configuration#additional-permissions-api). For Self-Managed installations you can access and modify the OPA rules from your Tyk installation directory from [schemas/dashboard.rego](/api-management/dashboard-configuration#dashboard-opa-rules). +Moreover, using these rules you can also modify request content. Our recommendation is to use those modifications in a development environment and remember to create a backup of the rego rules. + +#### Language intro +The Open Policy Agent (OPA, pronounced β€œoh-pa”) is an open source, general-purpose policy engine that unifies policy enforcement across the stack. OPA provides a high-level declarative language (Rego) that lets you specify policy as code and simple APIs to offload policy decision-making from your software. (source: https://www.openpolicyagent.org/docs/latest/) + +#### What is Rego? +OPA policies are expressed in a high-level declarative language called Rego. Rego (pronounced β€œray-go”) is purpose-built for expressing policies over complex hierarchical data structures. For detailed information on Rego see the [Policy Language](https://www.openpolicyagent.org/docs/latest/policy-language) documentation. + +Rego was inspired by Datalog, which is a well understood, decades old query language. Rego extends Datalog to support structured document models such as JSON. + +Rego queries are assertions on data stored in OPA. These queries can be used to define policies that enumerate instances of data that violate the expected state of the system. + + +#### Why use Rego? +Use Rego for defining a policy that is easy to read and write. + +Rego focuses on providing powerful support for referencing nested documents and ensuring that queries are correct and unambiguous. + +Rego is declarative so policy authors can focus on what queries should return rather than how queries should be executed. These queries are simpler and more concise than the equivalent in an imperative language. + +Like other applications which support declarative query languages, OPA is able to optimize queries to improve performance. + +Rego supports a variety of statements and functions. You can even use things like HTTP calls to build policies that depends on third-party APIs. +See more about the language itself [here](https://www.openpolicyagent.org/docs/latest/policy-language/). + + +#### Tyk policy primitives +The main building block which is required for controlling access is a "deny" rule, which should return a detailed error in case of a rejection. You can specify multiple deny rules, and they will all be evaluated. If none of the rules was matched, user will be allowed to access the resource. + +A simple deny rule with a static error message can look like: + +```javascript +deny["User is not active"] { + not input.user.active +} +``` + +You can also specify a dynamic error message: + +```javascript +# None of the permissions was matched based on path +deny[x] { + count(request_permission) == 0 + x := sprintf("Unknown action '%v'", [input.request.path]) +} +``` + +In addition, to `deny` rules, you can also modify the requests using `patch_request`. +You should respond with a JSON merge patch format https://tools.ietf.org/html/rfc7396 +For example: + +```javascript +# Example: Enforce http proxy configuration for an APIs with category #external. +patch_request[x] { + request_permission[_] == "apis" + request_intent == "write" + contains(input.request.body.api_definition.name, "#external") + + x := {"api_definition": {"proxy": {"transport": {"proxy_url": "http://company-proxy:8080"}}}} +} +``` + + +#### Getting Tyk Objects +In some cases, you may want to write a rule which is based on existing Tyk Object. +For example, you can write a rule for a policy API, which depends on the metadata of the API inside it. +The policy engine has access to the `TykAPIGet` function, which essentially just does a GET call to the Tyk Dashboard API. + +Example: + +```javascript +api := TykAPIGet("/apis/api/12345") +contains(api.target_url, "external.com") +``` + +Getting changeset of current request +For requests which modify the content, you can get a changeset (e.g. difference) using the `TykDiff` function, combined with a `TykAPIGet` call to get the original object. + +Example: + +```javascript +# Example of the complex rule which forbids user to change API status, if he has some custom permission +deny["You are not allowed to change API status"] { + input.user.user_permissions["test_disable_deploy"] + + # Intent is to to update API + request_permission[_] == "apis" + request_intent == "write" + + # Lets get original API object, before update + # TykAPIGet accepts API url as argument, e.g. to receive API object call: TykAPIGet("/api/apis/") + api := TykAPIGet(input.request.path) + + # TykDiff performs Object diff and returns JSON Merge Patch document https://tools.ietf.org/html/rfc7396 + # For example if only state has changed diff may look like: {"api_definition":{"state": "active"}} + diff := TykDiff(api, input.request.body) + + # API state has changed + not is_null(diff.api_definition.active) +} +``` + +#### Developer guide +Since Opa rules are declarative, to test them in the majority of the cases, you can test your rules without using the Tyk Dashboard, and using this Rego [playground](https://play.openpolicyagent.org). +When it comes to the `TykAPIGet` and `TykDiff` functions, you can mock them in your tests. + +In order to understand how the Dashboard evaluates the rules, you can enable debugging mode by setting the `security.open_policy.debug` option, and in the Dashboard logs, you will see the detailed output with input and output of the rule engine. It can be useful to copy-paste the Dashboard log output to the Rego playground, fix the issue, and validate it on the Dashboard. + +When you modify the `dashboard.opa` file, you will need to restart your tyk Dashboard. + +#### Using the Open Policy Agent in the Dashboard + +As well as configuring OPA rules through the API, admin users can view and edit OPA rules from within the Tyk Dashboard. The advantage of configuring your OPA rules in the Dashboard is that you can use a code editor for it, emulating a proper developer experience. There are two ways you can do this: + +1. From the **OPA Rules menu**. From the Dashboard Management menu, select OPA Rules. You can view and make any changes and select whether your OPA rules should be enabled or disabled. + +OPA Rules Menu + +2. From **Developer Tools**. Using the keyboard shortcut `CMD+SHIFT+D` (or `CTRL+SHIFT+D` for PC), you can open the Developer Tools panel on any page in the Dashboard and configure the permissions. Updates are applied in real-time. + + + + + OPA rules can only be accessed by admin role users in the Dashboard. + + + +OPA Floating UI +OPA screen + +### Dashboard OPA rules + + + +### Configuring Open Policy Agent Rules + +This is an end-to-end worked example showing how to configure Open Policy Agent rules with some [additional permissions](/api-management/dashboard-configuration#additional-permissions-api). + +#### Use Case + +Tyk's [RBAC](/api-management/user-management) includes out of the box permissions to Write, Read, and Deny access to API Definitions, but what if we want to distinguish between those users who can create APIs and those users who can edit or update APIs? Essentially, we want to extend Tyk's out of the box RBAC to include more fine grained permissions that prevent an `API Editor` role from creating new APIs, but allow them to edit or update existing APIs. + +#### High Level Steps + +The high level steps to realize this use case are as follows: + +1. Create additional permissions using API +2. Create user +3. Add Open Policy Agent Rule +4. Test new rule + + +#### Create additional permissions + +To include the `API Editor` role with additional permissions, send a PUT Request to the [Dashboard Additional Permissions API endpoint](/api-management/dashboard-configuration#additional-permissions-api) `/api/org/permissions` + +**Sample Request** + +In order to add the new role/permissions use the following payload. + +```console +PUT /api/org/permissions HTTP/1.1 +Host: localhost:3000 +authorization:7a7b140f-2480-4d5a-4e78-24049e3ba7f8 + +{ + "additional_permissions": { + "api_editor": "API Editor" + } +} +``` + +**Sample Response** + +```json +{ + "Status": "OK", + "Message": "Additional Permissions updated in org level", + "Meta": null +} +``` + +
+ + +Remember to set the `authorization` header to your Tyk Dashboard API Access Credentials secret, obtained from your user profile on the Dashboard UI. + +This assumes no other additional permissions already exist. If you're adding to existing permissions you'll want to send a GET to `/api/org/permissions` first, and then add the new permission to the existing list. + + + +#### Create user +In the Dashboard UI, navigate to System Management -> Users, and hit the `Add User` button. Create a user that has API `Write` access and the newly created `API Editor` permission, e.g. + +User with Additional Permission + +##### Add Open Policy Agent (OPA) Rule + +In the Dashboard UI, navigate to Dashboard Management -> OPA Rules + +Edit the rules to add the following: + +``` +request_intent = "create" { input.request.method == "POST" } +request_intent = "update" { input.request.method == "PUT" } + + +# Editor and Creator intent +intent_match("create", "write") +intent_match("update", "write") + + +# API Editors not allowed to create APIs +deny[x] { + input.user.user_permissions["api_editor"] + request_permission[_] == "apis" + request_intent == "create" + x := "API Editors not allowed to create APIs." +} +``` + +Updated Default OPA Rules incorporating the above rules as follows: + +```bash +# Default OPA rules +package dashboard_users +default request_intent = "write" +request_intent = "read" { input.request.method == "GET" } +request_intent = "read" { input.request.method == "HEAD" } +request_intent = "delete" { input.request.method == "DELETE" } +request_intent = "create" { input.request.method == "POST" } +request_intent = "update" { input.request.method == "PUT" } +# Set of rules to define which permission is required for a given request intent. +# read intent requires, at a minimum, the "read" permission +intent_match("read", "read") +intent_match("read", "write") +intent_match("read", "admin") +# write intent requires either the "write" or "admin" permission +intent_match("write", "write") +intent_match("write", "admin") +# delete intent requires either the "write" or "admin permission +intent_match("delete", "write") +intent_match("delete", "admin") +# Editor and Creator intent +intent_match("create", "write") +intent_match("update", "write") +# Helper to check if the user has "admin" permissions +default is_admin = false +is_admin { + input.user.user_permissions["IsAdmin"] == "admin" +} +# Check if the request path matches any of the known permissions. +# input.permissions is an object passed from the Tyk Dashboard containing mapping between user permissions (β€œread”, β€œwrite” and β€œdeny”) and the endpoint associated with the permission. +# (eg. If β€œdeny” is the permission for Analytics, it means the user would be denied the ability to make a request to β€˜/api/usage’.) +# +# Example object: +# "permissions": [ +# { +# "permission": "analytics", +# "rx": "\\/api\\/usage" +# }, +# { +# "permission": "analytics", +# "rx": "\\/api\\/uptime" +# } +# .... +# ] +# +# The input.permissions object can be extended with additional permissions (eg. you could create a permission called β€˜Monitoring’ which gives β€œread” access to the analytics API β€˜/analytics’). +# This is can be achieved inside this script using the array.concat function. +request_permission[role] { + perm := input.permissions[_] + regex.match(perm.rx, input.request.path) + role := perm.permission +} +# --------- Start "deny" rules ----------- +# A deny object contains a detailed reason behind the denial. +default allow = false +allow { count(deny) == 0 } +deny["User is not active"] { + not input.user.active +} +# If a request to an endpoint does not match any defined permissions, the request will be denied. +deny[x] { + count(request_permission) == 0 + x := sprintf("This action is unknown. You do not have permission to access '%v'.", [input.request.path]) +} +deny[x] { + perm := request_permission[_] + perm != "ResetPassword" + not is_admin + not input.user.user_permissions[perm] + x := sprintf("You do not have permission to access '%v'.", [input.request.path]) +} +# Deny requests for non-admins if the intent does not match or does not exist. +deny[x] { + perm := request_permission[_] + not is_admin + not intent_match(request_intent, input.user.user_permissions[perm]) + x := sprintf("You do not have permission to carry out '%v' operation.", [request_intent, input.request.path]) +} +# If the "deny" rule is found, deny the operation for admins +deny[x] { + perm := request_permission[_] + is_admin + input.user.user_permissions[perm] == "deny" + x := sprintf("You do not have permission to carry out '%v' operation.", [request_intent, input.request.path]) +} +# Do not allow users (excluding admin users) to reset the password of another user. +deny[x] { + request_permission[_] = "ResetPassword" + not is_admin + user_id := split(input.request.path, "/")[3] + user_id != input.user.id + x := sprintf("You do not have permission to reset the password for other users.", [user_id]) +} +# Do not allow admin users to reset passwords if it is not allowed in the global config +deny[x] { + request_permission[_] == "ResetPassword" + is_admin + not input.config.security.allow_admin_reset_password + not input.user.user_permissions["ResetPassword"] + x := "You do not have permission to reset the password for other users. As an admin user, this permission can be modified using OPA rules." +} +# API Editors not allowed to create APIs +deny[x] { + input.user.user_permissions["api_editor"] + request_permission[_] == "apis" + request_intent == "create" + x := "API Editors not allowed to create APIs." +} +# --------- End "deny" rules ---------- +################################################################################################################## +# Demo Section: Examples of rule capabilities. # +# The rules below are not executed until additional permissions have been assigned to the user or user group. # +################################################################################################################## +# If you are testing using OPA playground, you can mock Tyk functions like this: +# +# TykAPIGet(path) = {} +# TykDiff(o1,o2) = {} +# +# You can use this pre-built playground: https://play.openpolicyagent.org/p/T1Rcz5Ugnb +# Example: Deny users the ability to change the API status with an additional permission. +# Note: This rule will not be executed unless the additional permission is set. +deny["You do not have permission to change the API status."] { + # Checks the additional user permission enabled with tyk_analytics config: `"additional_permissions":["test_disable_deploy"]` + input.user.user_permissions["test_disable_deploy"] + # Checks the request intent is to update the API + request_permission[_] == "apis" + request_intent == "write" + # Checks if the user is attempting to update the field for API status. + # TykAPIGet accepts API URL as an argument, e.g. to receive API object call: TykAPIGet("/api/apis/") + api := TykAPIGet(input.request.path) + # TykDiff performs Object diff and returns JSON Merge Patch document https://tools.ietf.org/html/rfc7396 + # eg. If only the state has changed, the diff may look like: {"active": true} + diff := TykDiff(api, input.request.body) + # Checks if API state has changed. + not is_null(diff.api_definition.active) +} +# Using the patch_request helper you can modify the content of the request +# You should respond with JSON merge patch. +# See https://tools.ietf.org/html/rfc7396 for more details +# +# Example: Modify data under a certain condition by enforcing http proxy configuration for all APIs with the #external category. +patch_request[x] { + # Enforce only for users with ["test_patch_request"] permissions. + # Remove the ["test_patch_request"] permission to enforce the proxy configuration for all users instead of those with the permission. + input.user.user_permissions["test_patch_request"] + request_permission[_] == "apis" + request_intent == "write" + contains(input.request.body.api_definition.name, "#external") + x := {"api_definition": {"proxy": {"transport": {"proxy_url": "http://company-proxy:8080"}}}} +} +# You can create additional permissions for not only individual users, but also user groups in your rules. +deny["Only '%v' group has permission to access this API"] { + # Checks for the additional user permission enabled with tyk_analytics config: '"additional_permissions":["test_admin_usergroup"] + input.user.user_permissions["test_admin_usergroup"] + # Checks that the request intent is to access the API. + request_permission[_] == "apis" + api := TykAPIGet(input.request.path) + # Checks that the API being accessed has the category #admin-teamA + contains(input.request.body.api_definition.name, "#admin-teamA") + # Checks for the user group name. + not input.user.group_name == "TeamA-Admin" +} +``` + +#### Test +Login to the Dashboard UI as the new `API Editor` user and try to create a new API. You should see an `Access Denied` error message. Now try to update an existing API. This should be successful!! + + +## System Administration + +The Tyk Dashboard Admin API provides the following administrator level functions: + - Managing [organizations](#organizations). + - Creating initial [users](/api-management/dashboard-configuration#users-api-1) during boot-strapping of the system. + - Forcing a [URL reload](/api-management/dashboard-configuration#url-reload-api). + - [Exporting](#export-assets-api) and [importing](#import-assets-api) Tyk assets (orgs, APIs, policies) for backup or when migrating between environments. + - Setting up [SSO integration](#single-sign-on-api-1). + +### Organizations + +Many businesses have a complex structure, for example a lot of distinct departments where each department has its own teams. You might also need to deploy and manage multiple environments such as Production, Staging and QA for different stages in your product workflow. The Tyk Dashboard is multi-tenant capable which allows you to use a single Tyk Dashboard to host separate *organizations* for each team or environment. + +An Organization is a completely isolated unit, and has its own: + - API Definitions + - API Keys + - Users + - Developers + - Domain + - Tyk Classic Portal + +When bootstrapping your Dashboard, the first thing the bootstrap script does is to create a new default Organization. + +Additional organizations can be created and managed using the [Dashboard Admin API](#organizations-api). + +#### Tyk Gateway and organizations +The concept of an organization does not exist within the Tyk Gateway. Gateways only proxy and validate the rules imposed on them by the definitions and keys that are being processed, however at their core there are some security checks within the Gateway that ensure organizational ownership of objects. + +Tyk allows each organization to own its own set of Gateways, for example when you want to use different hosting providers you can segregate them in terms of resources, or just for security reasons. + +Self-Managed users should use [API tagging](/api-management/multiple-environments#api-tagging-with-on-premises) and enforce a tagging standard across all organizations. + +All actions in a Self-Managed installation of Tyk must use a base Organization, and all actions should stem from a User owned by that organization. + + + +A user that does not belong to an Organization is sometimes referred to as an *unbounded user*. These users have visibility across all Organizations, but should be granted read-only access. + + + +### Dashboard Audit Logs + +The audit log system captures detailed records of all requests made to endpoints under the `/api` route. These audit logs can be stored either in files (in JSON or text format) or in the database, providing flexible options for log management and retrieval. + +Subsequently, if hosting Tyk Dashboard within a Kubernetes cluster, please ensure that the configured log file path is valid and writeable. + +The Tyk Dashboard config section contains an audit section for configuring audit logging behavior. An example is listed below. + +```yaml + ... + "audit": { + "enabled": true, + "format": "json", + "path": "/tmp/audit.log", + "detailed_recording": false + }, + ... +``` + +#### Configuration Parameters + +| Parameter | Description | Default | +| :---- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------- | +| enabled | Enable audit logging. Setting `security.audit_log_path` also enables audit logging | true | +| format | Specifies audit log file format. Valid values are `json` and `text` | `text` | +| path | Path to the audit log. Overwrites `security.audit_log_path` if it was set | | +| detailed_recording | Enable detailed records in the audit log. If set to `true` then audit log records will contain the http-request (without body) and full http-response including the body | `false` | +| store_type | Specifies the storage in which audit logs will be written, valid values are `file` and `db`. | `file` | + +Please consult [Tyk Dashboard Configuration Options](/tyk-dashboard/configuration#audit) for equivalent configuration with environment variables. + +#### JSON File Format + +Audit records the following fields for `json` format: + +| Field | Description | +| :---- | :---- | +| req_id | Unique request ID | +| org_id | Organization ID | +| date | Date in *RFC1123* format | +| timestamp | UNIX timestamp | +| ip | IP address the request originated from | +| user | Dashboard user who performed the request | +| action | Description of the action performed (e.g. Update User) | +| method | HTTP request method | +| url | URL of the request | +| status | HTTP response status of the request | +| diff | Provides a diff of changed fields (available only for PUT requests) | +| request_dump | HTTP request copy (available if `detailed_recording` is set to `true`) | +| response_dump | HTTP response copy (available if `detailed_recording` is set to `true`) | + +#### Text File Format + +The `text` format outputs all fields as plain text separated with a new line and provided in the same order as `json` format. + +#### Database Storage Support + +In addition to file storage, audit logs can be stored in the main database (MongoDB or Postgres), this feature has been available since Tyk 5.7.0. To enable database storage set `audit.store_type` to `db`: + +```yaml +... + "audit": { + "enabled": true, + "store_type": "db", + "detailed_recording": false + } +... +``` + +When `store_type` is set to `db`, audit logs will be stored in the main database storage instead of a file. + +#### Retrieving Audit Logs via API + +Since Tyk 5.7.0 a new API endpoint has been added to allow authorized users to retrieve audit logs from the database storage. To know more about the API specifications, check out the swagger [documentation](/tyk-dashboard-api). +To access the audit logs through the API ensure that your user account or group has been granted the "Audit Logs" RBAC group. If you do not have the necessary permissions, please contact your system administrator. + +## Supported Database + +Tyk Dashboard requires a persistent datastore for its operations. By default MongoDB is used. From Tyk v4.0, we also support PostgreSQL. + +### MongoDB Supported Versions + + + +#### Configuring MongoDB + +Please check [here](/planning-for-production/database-settings#mongodb) for MongoDB driver and production configurations. + +### PostgreSQL Supported Versions + + + + + +SQLite support will be deprecated from Tyk 5.7.0. To avoid disrupution, please transition to PostgreSQL, MongoDB or one of the listed compatible alternatives. + + + +#### Configuring PostgreSQL + +Please check [here](#configuring-postgresql) for production configurations. + +See the following pages for configuring your SQL installation with Tyk: + +* [Configuring Tyk Dashboard](#configuring-postgresql) +* [Configuring Tyk Pumps](#configuring-postgresql) + +All data stored in SQL platforms will be identical to our existing MongoDB support. + +### Which platform should you use? + + + +Tyk no longer supports SQLite as of Tyk 5.7.0. To avoid disruption, please transition to [PostgreSQL](/planning-for-production/database-settings#postgresql), [MongoDB](/planning-for-production/database-settings#mongodb), or one of the listed compatible alternatives. + + + +We recommend the following: + +* For PoC installations, you can use PostgreSQL or MongoDB. +* For production installations, we **only** support MongoDB or PostgreSQL + +## Data Storage Solutions + +Tyk stores a variety of data in 4 separate data storage layers. You can configure each layer separately to use one of our supported database platforms. Alternatively a single platform can be used for all layers. The 4 data storage layers are as follows: +1. **Main**: Stores configurations of: APIs, Policies, Users and User Groups. +2. **Aggregate Analytics**: Data used to display Dashboard charts and [analytics](#traffic-analytics). +3. **Logs**: When [detailed logging](/api-management/troubleshooting-debugging#capturing-detailed-logs) is enabled, request and response data is logged to storage. These logs can previewed in the Dashboard [log browser](#activity-logs). +4. **Uptime**: Uptime test analytics. + +Being extensible, Tyk supports storing this data across different databases (MongoDB, MySQL and PostgreSQL etc.). For example, Tyk can be configured to store analytics in PostgreSQL, logs in MongoDB and uptime data in MySQL. + +As illustrated below it can be seen that Tyk Pump writes to one or more external data sources via a Redis store. Conversely, Tyk Dashboard reads this data from the external data sources. + +Tyk Dashboard Pump Architecture + +The following details are required to manage this configuration: +- Data storage layer type +- Database engine +- Database connection string + +The remainder of this document explains how to configure Tyk Dashboard and Tyk Pump to read and write from one or more data storage layers, respectively. + +### Configure Dashboard to Read from a Data Storage Layer + +Tyk Dashboard has configuration environment variables for each data storage layer in the following format: + +```console +TYK_DB_STORAGE__TYPE +TYK_DB_STORAGE__CONNECTIONSTRING +``` + +where *LAYER* can be *MAIN*, *ANALYTICS*, *LOGS* or *UPTIME*. + +For example, to configure Tyk Dashboard to read logs from a mongo database, the following environment variables are required: + +```console +TYK_DB_STORAGE_LOGS_TYPE=mongo +TYK_DB_STORAGE_LOGS_CONNECTIONSTRING=mongodb://db_host_name:27017/tyk_analytics +``` + +The full set of environment variables are listed below: + +```console +TYK_DB_STORAGE_MAIN_TYPE +TYK_DB_STORAGE_MAIN_CONNECTIONSTRING +TYK_DB_STORAGE_LOGS_TYPE +TYK_DB_STORAGE_LOGS_CONNECTIONSTRING +TYK_DB_STORAGE_ANALYTICS_TYPE +TYK_DB_STORAGE_ANALYTICS_CONNECTIONSTRING +TYK_DB_STORAGE_UPTIME_TYPE +TYK_DB_STORAGE_UPTIME_CONNECTIONSTRING +``` + +It should be noted that Tyk will attempt to use the configuration for the *main* data storage layer when no corresponding configuration is available for logs, uptime or analytics. + +Please refer to the [storage configuration](/tyk-dashboard/configuration#storage) section to explore the parameters for configuring Tyk Dashboard to read from different storage layers. + + +### Configure Pump to Write to Data Storage Layers? + +Please consult the Pump configuration [guide](/api-management/tyk-pump#sql-uptime-pump) for an explanation of how to configure Tyk Pump to write to different storage layers. + +The remainder of this section explains the *environment variables* that can be used to configure Tyk Pump to write to the following data storage layers: +- Uptime +- Aggregated Analytics +- Logs + +#### Write Uptime Data + +Tyk Pump can be configured to write uptime data to SQL (Postgres and SQL Lite) and Mongo. The default behavior is to write to Mongo. + +##### PostgreSQL Database + +Tyk Pump can be configured to write to a PostgreSQL database, using the following environment variables: + +- *TYK_PMP_UPTIMEPUMPCONFIG_UPTIMETYPE*: Set to *sql* to configure Pump to store logs in a SQL based database. +- *TYK_PMP_UPTIMEPUMPCONFIG_TYPE*: Set to *postgres* to configure Pump to use a PostgreSQL database for uptime data. +- *TYK_PMP_UPTIMEPUMPCONFIG_CONNECTIONSTRING*: Set the connection string for the PostgreSQL database. + +An example configuration is shown below: + +```console +TYK_PMP_UPTIMEPUMPCONFIG_UPTIMETYPE=sql +TYK_PMP_UPTIMEPUMPCONFIG_TYPE=postgres +TYK_PMP_UPTIMEPUMPCONFIG_CONNECTIONSTRING=user=postgres password=topsecretpassword host=tyk-postgres port=5432 database=tyk_analytics +``` + +Further details for configuring an uptime SQL database are available [here](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#uptime_pump_configuptime_type) + +##### Mongo Database + +Tyk Pump can be configured to write to a Mongo database, using the following environment variables: + +- *TYK_PMP_UPTIMEPUMPCONFIG_UPTIMETYPE*: Set to *mongo* to configure Pump to store logs in a Mongo database. +- *TYK_PMP_UPTIMEPUMPCONFIG_MONGOURL*: Set to Mongo database connection string. +- *TYK_PMP_UPTIMEPUMPCONFIG_COLLECTIONNAME*: Set to the name of the collection used to store uptime analytics. + +```console +TYK_PMP_UPTIMEPUMPCONFIG_UPTIMETYPE=mongo +TYK_PMP_UPTIMEPUMPCONFIG_MONGOURL=mongodb://db_host_name:27017/tyk_uptime_db +TYK_PMP_UPTIMEPUMPCONFIG_COLLECTIONNAME=umptime_analytics +``` + +Further details for configuring a Tyk Mongo Pump are available [here](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#uptime_pump_config) + +#### Write Logs Data + +Tyk Pump can be configured to write logs to Mongo or SQL based databases. + +##### Mongo Database + +Tyk Pump can be configured to write to a Mongo database by setting the following environment variables: + +- *TYK_PMP_PUMPS_LOGS_TYPE*: Set to *mongo* to configure Pump to store logs in a Mongo database. +- *TYK_PMP_PUMPS_LOGS_META_MONGOURL*: Set the connection string for the Mongo database. +- *TYK_PMP_PUMPS_LOGS_META_COLLECTION_NAME*: Set the name of the collection that will store logs in the Mongo database. + +An example is listed below: + +```console +TYK_PMP_PUMPS_LOGS_TYPE=mongo +TYK_PMP_PUMPS_LOGS_META_MONGOURL=mongodb://tyk-mongo:27017/tyk_analytics +TYK_PMP_PUMPS_LOGS_META_COLLECTIONNAME=tyk_logs +``` + +##### PostgreSQL Database + +Tyk Pump can be configured to write to a PostgreSQL database by setting the following environment variables: + +- *TYK_PMP_PUMPS_LOGS_TYPE*: Set to *SQL* to configure Pump to store logs in a SQL based database. +- *TYK_PMP_PUMPS_LOGS_META_TYPE*: Set to *postgres* to configure Pump to store logs in a PostgreSQL database. +- *TYK_PMP_PUMPS_LOGS_META_CONNECTIONSTRING*: Set the name of the connection string for the PostgreSQL database. + +```console +TYK_PMP_PUMPS_LOGS_TYPE=SQL +TYK_PMP_PUMPS_LOGS_META_TYPE=postgres +TYK_PMP_PUMPS_LOGS_META_CONNECTIONSTRING=user=postgres password=topsecretpassword host=tyk-postgres port=5432 database=tyk_analytics +``` + +##### MySQL Database + +Tyk Pump can be configured to write to a MySQL database by setting the following environment variables: + +- *TYK_PMP_PUMPS_LOGS_TYPE*: Set to *SQL* to configure Pump to store logs in a SQL based database. +- *TYK_PMP_PUMPS_LOGS_META_TYPE*: Set to *mysql* to configure Pump to store logs in a MySQL database. +- *TYK_PMP_PUMPS_LOGS_META_CONNECTIONSTRING*: Set the name of the connection string for the MySQL database. + +```console +TYK_PMP_PUMPS_LOGS_TYPE=SQL +TYK_PMP_PUMPS_LOGS_META_TYPE=mysql +TYK_PMP_PUMPS_LOGS_META_CONNECTIONSTRING=mysql://db_host_name:3306/tyk_logs_db +``` + +#### Write Aggregated Analytics Data + +Aggregated analytics corresponds to data that is used for the display of charts and graphs in [dashboard](#traffic-analytics). Tyk Pump can be configured to write aggregated analytics data to SQL based databases or MongoDB. + +##### SQL Database + + + +Tyk no longer supports SQLite as of Tyk 5.7.0. To avoid disruption, please transition to [PostgreSQL](/planning-for-production/database-settings#postgresql), [MongoDB](/planning-for-production/database-settings#mongodb), or one of the listed compatible alternatives. + + + +Storage of aggregated analytics data has been tested with PostgreSQL and SqlLite databases. The following environment variables can be used to manage this configuration: + +- *TYK_PMP_PUMPS_SQLAGGREGATE_TYPE*: Set to *sql_aggregate* to configure Pump to store aggregated analytics data for charts and graphs in dashboard to a SQL based database. +- *TYK_PMP_PUMPS_SQLAGGREGATE_META_TYPE*: The database engine used to store aggregate analytics. Tested values are *postgres* or *sqlite*. +- *TYK_PMP_PUMPS_SQLAGGREGATE_META_CONNECTIONSTRING*: The connection string for the database that will store the aggregated analytics. + +The example below demonstrates how to configure Tyk Pump to write aggregated to a PostgreSQL database: + +```console +TYK_PMP_PUMPS_SQLAGGREGATE_TYPE=SQL +TYK_PMP_PUMPS_SQLAGGREGATE_META_TYPE=postgres +TYK_PMP_PUMPS_SQLAGGREGATE_META_CONNECTIONSTRING=user=postgres password=topsecretpassword host=tyk-postgres port=5432 database=tyk_aggregated_analytics +``` + +##### Mongo Database + +Tyk Pump can be configured to write aggregated analytics data to MongoDB. Aggregated analytics are written to a collection named `z_tyk_analyticz_aggregate_{ORG ID}`, where *ORG_ID* corresponds to the ID of your organization assigned by Tyk. + +The following environment variables can be used as a minimum to manage this configuration: + +- *TYK_PMP_PUMPS_MONGOAGGREGATE_TYPE*: Set to *mongo-pump-aggregate* to configure Pump to store aggregated analytics data in a MongoDB database. +- *TYK_PMP_PUMPS_MONGOAGGREGATE_META_MONGOURL*: Mongo database connection URL. + +An example is given below: + +```console +- TYK_PMP_PUMPS_MONGOAGGREGATE_TYPE=mongo-pump-aggregate +- TYK_PMP_PUMPS_MONGOAGGREGATE_META_MONGOURL=mongodb://db_host_name:27017/tyk_aggregated_analytics_db +``` diff --git a/api-management/data-graph.mdx b/api-management/data-graph.mdx new file mode 100644 index 000000000..bc4a1c6b5 --- /dev/null +++ b/api-management/data-graph.mdx @@ -0,0 +1,1883 @@ +--- +title: "Universal Data Graph" +description: "How to Configure data graph" +keywords: "UDG, Universal Data Graph, Datasource, Concepts, Arguments, Field Mapping, Header Management, Graphql, Kafka, Rest, Examples" +sidebarTitle: "Universal Data Graph (UDG)" +--- + +## Overview + +The Universal Data Graph (UDG) lets you combine multiple APIs into one universal interface. +With the help of GraphQL you're able to access multiple APIs with a single query. + +It's important to note that you don't even have to build your own GraphQL server. +If you have existing REST APIs all you have to do is configure the UDG. + +With the Universal Data Graph Tyk becomes your central integration point for all your internal as well as external APIs. +In addition to this, the UDG benefits from all existing solutions that already come with your Tyk installation. +That is, your Data Graph will be secure from the start and there's a large array of middleware you can build on to power your Graph. + +Universal Datagraph Overview + +Currently supported DataSources: +- REST +- GraphQL +- SOAP (through the REST datasource) +- Kafka + + + + + To start creating your first Universal Data Graph in Tyk Dashboard, go to "Data Graphs" section of the menu. + + + +Make sure to check some of the resources to help you start: +- [How to create UDG schema](/api-management/data-graph#creating-schema) +- [How to connect data sources](/api-management/data-graph#connect-datasource) +- [How to secure the data graph](/api-management/data-graph#security) + +## Key Concepts + +### Universal Data Graph + +The Universal Data Graph (UDG) introduces a few concepts you should fully understand in order to make full use of it. + +UDG comes with a fully spec compliant GraphQL engine that you don't have to code, you just have to configure it. + +For that you have to define your "DataSources" and might want to add "Field Mappings" as well as "Arguments" to your configuration. +Read on in the sub sections to understand the full picture to use UDG to its full potential. + +To help you, we have put together the following video. + + + +### DataSources + +In most GraphQL implementations you have the concept of Resolvers. +Resolvers are functions that take optional parameters and return (resolve) some data. +Each resolver is attached to a specific type and field. + +DataSources are similar in that they are responsible for loading the data for a certain field and type. +The difference is that with DataSources you simply configure how the engine should fetch the data whereas with traditional GraphQL frameworks you have to implement the function on your own. + +DataSources can be internal as well as external. + +Internal DataSources are APIs that are already managed by Tyk, such as REST or SOAP services configured through the Dashboard. +You can take advantage of Tyk’s rich middleware ecosystem to validate and transform requests and responses for these internal DataSources. + +External DataSources are APIs that you’re not currently managing through Tyk. +For simplicity, you can add them to your data graph without first configuring them as dedicated APIs in Tyk. +If you later decide to apply middleware or other policies, you can easily transition an external DataSource into a managed internal API. + +Head over to the [connect data source](/api-management/data-graph#udg) section to learn about the supported data sources and how to connect them to Tyk. + +### Arguments + +Looking back at the example from the "Field Mappings", you might wonder how to use the "id" argument from the GraphQL query to make the correct REST API call to the user service. + +Here's the schema again: + +```graphql +type Query { + user(id: Int!): User +} + +type User { + id: Int! + name: String +} +``` + +We assume you already have your DataSource attached and now want to configure it so that the path argument gets propagated accordingly. +You need to tell the GraphQL engine that when it comes to resolving the field "user", take the argument with the name "id" and use it in the URL to make the request to the REST API. +You do this by using templating syntax to inject it into the URL. +This is done from the "Configure data source" tab, which will show after clicking a schema argument or object field. +Typing an opening curly brace ( `{` ) will produce a dropdown that contains all available fields and arguments. + +```html +https://example.com/user/{{ .arguments.id }} +``` + +Create New API + +### Field Mappings + +Universal Data Graph can automatically resolve where data source information should go in the GraphQL response as long as the GraphQL schema mirrors the data source response structure. + +Let's assume you have a REST API with a user resource like this: `http://example.com/users/:id` + +The following is an example response: + +```json +{ + "id": 1, + "name": "Martin Buhr" +} +``` + +If GraphQL schema in UDG is set as the following: +```graphql +type Query { + user(id: Int!): User +} + +type User { + id: Int! + name: String +} +``` +and REST data source at attached behind `user(id: Int!)` query, UDG will be able to automatically resolve where `id` and `name` values should be in UDG response. In this case no field mapping is necessary. + + + +GraphQL does not support field names with hyphens (e.g. `"user-name"`). This can be resolved by using field mappings as described below. + + + +Let's assume that the JSON response looked a little different: + +````json +{ + "id": 1, + "user_name": "Martin Buhr" +} +```` + +If this were the JSON response you received from the REST API, you must modify the path for the field "name". +This is achieved by unchecking the "Disable field mapping" checkbox and setting the Path to "user_name". + +Nested paths can be defined using a period ( . ) to separate each segment of the JSON path, *e.g.*, "name.full_name" + +In cases where the JSON response from the data source is wrapped with `[]` like this: + +```json +[ + { + "id": 1, + "name": "Martin Buhr", + "phone-number": "+12 3456 7890" + } +] +``` +UDG will not be able to automatically parse `id`, `name` and `phone-number` and fields mapping needs to be used as well. To get the response from inside the brackets the following syntax has to be used in field mapping: `[0]`. + +It is also possible to use this syntax for nested paths. For example: `[0].user.phone-number` + +#### Field mapping in Tyk Dashboard + +See below how to configure the field mapping for each individual field. + +Field mapping UI + + +#### Field mapping in Tyk API definition + +If you're working with raw Tyk API definition the field mapping settings look like this: + +```json +{"graphql": { + "engine": { + "field_configs": [ + { + "type_name": "User", + "field_name": "phoneNumber", + "disable_default_mapping": false, + "path": [ + "[0]", + "user", + "phone-number" + ] + } + ] + } + } + } +``` + +Notice that even though in Tyk Dashboard the nested path has a syntax with ( . ), in Tyk API definition it becomes an array of strings. + +There's more UDG concepts that would be good to understand when using it for the first time: +* [UDG Arguments](/api-management/data-graph#arguments) +* [UDG Datasources](/api-management/data-graph#udg) + +### Reusing response fields + +When using the UDG, there may be a situation where you want to access an API with data coming from another API. +Consider the following REST APIs: + + - REST API for people: `https://people-api.dev/people` + - REST API for a specific person: `https://people-api.dev/people/{person_id}` + - REST API for driver licenses: `https://driver-license-api.dev/driver-licenses/{driver_license_id}` + +The REST API for a person will give us the following response: +```json +{ + "id": 1, + "name": "John Doe", + "age": 40, + "driverLicenseID": "DL1234" +} +``` + +And the REST API response for driver licenses looks like this: +```json +{ + "id": "DL1234", + "issuedBy": "United Kingdom", + "validUntil": "2040-01-01" +} +``` + +As you can see by looking at the example responses, you could use the `driverLicenseID` from the People API to obtain the driver license data from the Driver License API. + +You also want to design the schema so that it represents the relationship between a person and a driver license. +As the person object is referencing a driver license by its ID, it means that we will need to define the driver license inside the person object as a field. +Consequently, a schema representing such a relationship might look like this: + +```graphql +type Query { + people: [Person] # Data source for people + person(id: Int!): Person # Data Source for a specific person +} + +type Person { + id: Int! + name: String! + age: Int! + driverLicenseID: ID + driverLicense: DriverLicense # Data Source for a driver license +} + +scalar Date + +type DriverLicense { + id: ID! + issuedBy: String! + validUntil: Date! +} +``` + +#### Defining the data source URLs + +Now it's all about defining the data source URLs. + +For the field `Query.people`, you can simply use the URL to the API: +``` +https://people-api.dev/people +``` + +The `Query.person` field needs to use its `id` argument to call the correct API endpoint. + +See [Concept: Arguments](/api-management/data-graph#arguments) to learn more about it. + ``` + https://people-api.dev/people/{{.arguments.id}} + ``` + +To retrieve the driver license data you need to be able to use the `driverLicenseID` from the `Person` object. As we defined the driver license data source on the `Person` object, you can now access all properties from the `Person` object by using the `.object` placeholder. + + + +If you want to access data from the object on which the data source is defined, use the `.object` placeholder (e.g: `.object.id` to access the `id` property from an object). + + + +So the URL for the driver license data source would look like this: +``` +https://driver-license-api.dev/driver-licenses/{{.object.driverLicenseID}} +``` + Use the object placeholder + +#### Result + +A query like: +```graphql +{ + people { + id + name + age + driverLicense { + id + issuedBy + validUntil + } + } +} +``` + +... will now result in something like this: +```json +{ + "data": { + "people": [ + { + "id": 1, + "name": "John Doe", + "age": 40, + "driverLicense": { + "id": "DL1234", + "issuedBy": "United Kingdom", + "validUntil": "2040-01-01" + } + }, + { + "id": 2, + "name": "Jane Doe", + "age": 30, + "driverLicense": { + "id": "DL5555", + "issuedBy": "United Kingdom", + "validUntil": "2035-01-01" + } + } + ] + } +} +``` + + +### Header management + +With Tyk v5.2 the possibilities of managing headers for Universal Data Graph and all associated data sources have been extended. + +#### Global headers for UDG + +Global headers can be configured via Tyk API Definition. The correct place to do that is within `graphql.engine.global_headers` section. For example: + +```json +{ + "graphql": { + "engine": { + "global_headers": [ + { + "key": "global-header", + "value": "example-value" + }, + { + "key": "request-id", + "value": "$tyk_context.request_id" + } + ] + } + } +} +``` + +Global headers now have access to all [request context variables](/api-management/traffic-transformation/request-context-variables). + +By default, any header that is configured as a global header, will be forwarded to all data sources of the UDG. + +#### Data source headers + +Data source headers can be configured via Tyk API Definition and via Tyk Dashboard UI. The correct place to do that is within `graphql.engine.datasources.config.headers` section. For example: + +```json +{ + "engine": { + "data_sources": [ + { + "config": { + "headers": { + "data-source-header": "data-source-header-value", + "datasource1-jwt-claim": "$tyk_context.jwt_claims_datasource1" + } + } + } + ] + } +} +``` + +Data source headers now have access to all [request context variables](/api-management/traffic-transformation/request-context-variables). + +#### Headers priority order + +If a header has a value at the data source and global level, then the data source value takes precedence. + +For example for the below configuration: + +```json +{ + "engine": { + "data_sources": [ + { + "config": { + "headers": { + "example-header": "data-source-value", + "datasource1-jwt-claim": "$tyk_context.jwt_claims_datasource1" + } + } + } + ], + "global_headers": [ + { + "key": "example-header", + "value": "global-header-value" + }, + { + "key": "request-id", + "value": "$tyk_context.request_id" + } + ] + } +} +``` + +The `example-header` header name is used globally and there is also a data source level header, with a different value. Value `data-source-value` will take priority over `global-header-value`, resulting in the following headers being sent to the data source: + +| Header name | Header value | Defined on level | +| :---------------- | :------------------------------------- | :------------------ | +| example-header | data-source-value | data source | +| datasource1 | $tyk_context.jwt_claims_datasource1 | data source | +| request-id | $tyk_context.request_id | global | + +## Connect Data Sources + +### UDG + +Datasources are the fuel to power any Unified Data Graph and the designed schema. + +Datasources can be attached to any field available in the composed UDG schema. They can also be nested within each other. + +You can add Datasources to your Universal Data Graph without adding them to Tyk as a dedicated API. This is useful for getting started but also limited in capabilities. Datasources that are managed within Tyk offer much more flexibility and allow for a much fuller API Management control. + +If you want to add quotas, rate limiting, body transformations etc. to a REST Datasource it is recommended to first import the API to Tyk. + +Supported DataSources: +- REST +- GraphQL +- SOAP (through the REST DataSource) +- Kafka + +### GraphQL + +The GraphQL Datasource is able to make GraphQL queries to your upstream GraphQL service. In terms of configuration there are no real differences between the GraphQL Datasource and the one for REST with one slight exception. + +#### GraphQL data source at operation root level + +To illustrate this we'll have a look at an example graph. + +Consider the following schema: + +```graphql +type Query { + employee(id: Int!): Employee +} +type Employee { + id: Int! + name: String! +} +``` + +Let's assume we would send the following query to a GraphQL server running this schema: + +```graphql +query TykCEO { + employee(id: 1) { + id + name + } +} +``` + +The response of this query would look like this: + +```json +{ + "data": { + "employee": { + "id": 1, + "name": "Martin Buhr" + } + } +} +``` + +Compared to a REST API one difference is obvious. The response is wrapped in the root field "data". +There's also the possibility of having a root field "errors" but that's another story. +For simplicity reasons the GraphQL Datasource will not return the "data" object but rather extract the "employee" object directly. +So if you want to get the field mappings right you don't have to think about errors or data. +You can assume that your response object looks like this: + +````json +{ + "employee": { + "id": 1, + "name": "Martin Buhr" + } +} +```` + +Compared to a REST API you should be able to identify the key difference here. +The response is wrapped in the field "employee" whereas in a typical REST API you usually don't have this wrapping. + +Because of this, field mappings are by default disabled for REST APIs. +For GraphQL APIs, the mapping is enabled by default and the path is set to the root field name. + +Create New API + +Other than this slight difference what's so special about the GraphQL Datasource to give it a dedicated name? + +The GraphQL Datasource will make specification-compliant GraphQL requests to your GraphQL upstream. When you attach a GraphQL Datasource to a field the Query planner of the Tyk GraphQL engine will collect all the sub fields of a root field in order to send the correct GraphQL query to the upstream. This means you can have multiple GraphQL and REST APIs side by side in the same schema, even nested, and the query planner will always send the correct query/request to each individual upstream to fetch all the data required to return a query response. + +**How does the query planner know which Datasource is responsible for a field?** + +When the query planner enters a field it will check if there is a Datasource attached to it. +If that's the case this Datasource will be responsible for resolving this field. +If there are multiple nested fields underneath this root field they will all be collected and provided to the root field Datasource. + +If however, one of the nested fields has another Datasource attached, ownership of the Datasource will shift to this new "root" field. +After leaving this second root field ownership of the Datasource for resolving fields will again shift back to the first Datasource. + +#### GraphQL data source at type/field level + +In case you want to add GraphQL data source at a lower level of your schema - type/field - the configuration steps are as follows: + +1. Navigate to the field you want the GraphQL data source to be connected to and click on it. +2. From the right-hand side menu choose **GraphQL | Tyk** or **External GraphQL** depending on wheather your data source was previously created in Tyk or if it's an external service. +Provide a data source name and URL. + +Above steps are explained in detail in our [Getting started pages](/api-management/data-graph#connect-datasource). + + +4. Tick the box next to `Add GraphQL operation` to see additional configuration fields. This will allow you to provide a query that will execute against the data source. +5. Write the query in the `Operation` box and if you're using any variables provide those in `Variables` box. + + + + + You can use objects from your Data Graph schema as variables by referring to them using this syntax: `{{.object.code}}` + + + + +Add GQL Operation + +### Kafka + +The Kafka DataSource is able to subscribe to Kafka topics and query the events with GraphQL. + + +The Kafka DataSource utilizes consumer groups to subscribe to the given topics, and inherits all behavior of the consumer group concept. + +Consumer groups are made up of multiple cooperating consumers, and the membership of these groups can change over time. Users can easily add a new consumer to the group to scale the processing load. A consumer can also go offline either for planned maintenance or due to an unexpected failure. Kafka maintains the membership of each group and redistributes work when necessary. + +When multiple consumers are subscribed to a topic and belong to the same consumer group, each consumer in the group will receive messages from a different subset of the partitions in the topic. You should know that if you add more consumers to a single group with a single topic than you have partitions, some consumers will be idle and get no messages. + +#### Basic Configuration + +You can find the full documentation for Kafka DataSource configuration here. + +**broker_addresses** +In order to work with the Kafka DataSource, you first need a running Kafka cluster. The configuration takes a list of known broker addresses and discovers the rest of the cluster. + +``` bash +{ + "broker_addresses": ["localhost:9092"] +} +``` + +**topics** +The Kafka DataSource is able to subscribe to multiple topics at the same time but you should know that the structs of events have to match the same GraphQL schema. + +```bash +{ + "topics": ["product-updates"] +} +``` + +**group_id** +As mentioned earlier, the Kafka DataSource utilizes the consumer group concept to subscribe to topics. We use the `group_id` field to set the consumer group name. + +```bash +{ + "group_id": "product-updates-group" +} +``` + +Multiple APIs can use the same `group_id` or you can run multiple subscription queries using the same API. Please keep in mind that the Kafka DataSource inherits all behaviors of the consumer group concept. + +**client_id** +Finally, we need the `client_id` field to complete the configuration. It is a user-provided string that is sent with every request to the brokers for logging, debugging, and auditing purposes. + +```bash +{ + "client_id": "tyk-kafka-integration" +} +``` + +Here is the final configuration for the Kafka DataSource: + +```bash +{ + "broker_addresses": ["localhost:9092"], + "topics": ["product-updates"], + "group_id": "product-updates-group", + "client_id": "tyk-kafka-integration" +} +``` + +The above configuration object is just a part of the API Definition Object of Tyk Gateway. + +#### Kafka Datasource configuration via Dashboard + +1. Click on the field which should have Kafka datasource attached + +2. From the right-hand side *Configure data source* panel choose KAFKA at the bottom in the *Add a new external data source* section + +Kafkaconfig + +3. Provide datasource name, broker address (at least 1), topics (at least 1), groupID, clientID. Optionally you can also choose Kafka version, balance strategy and field mapping options. + +4. Click *SAVE* button to persist the configuration. + +Once done the field you just configured will show information about data source type and name: + +KafkaList + +##### Subscribing to topics + +The `Subscription` type always defines the top-level fields that consumers can subscribe to. Let's consider the following definition: + +```bash +type Product { + name: String + price: Int + inStock: Int +} + +type Subscription { + productUpdated: Product +} +``` + +The `productUpdated` field will be updated each time a product is updated. Updating a product means a `price` or `inStock` fields of `Product` are updated and an event is published to a Kafka topic. Consumers can subscribe to the `productUpdated` field by sending the following query to the server: + +```bash +subscription Products { + productUpdated { + name + price + inStock + } +} +``` + +You can use any GraphQL client that supports subscriptions. + +##### Publishing events for testing + +In order to test the Kafka DataSource, you can publish the following event to `product-updates` topic: + +```bash +{ + "productUpdated": { + "name": "product1", + "price": 1624, + "inStock": 219 + } +} +``` + +You can use any Kafka client or GUI to publish events to `product-updates`. + +When you change any of the fields, all subscribers of the `productUpdated`kafk field are going to receive the new product info. + +The result should be similar to the following: + +API Menu + + +##### API Definition for the Kafka DataSource + +The Kafka DataSource configuration: + +```bash +{ + "kind": "Kafka", + "name": "kafka-consumer-group", + "internal": false, + "root_fields": [{ + "type": "Subscription", + "fields": [ + "productUpdated" + ] + }], + "config": { + "broker_addresses": [ + "localhost:9092" + ], + "topics": [ + "product-updates" + ], + "group_id": "product-updates-group", + "client_id": "tyk-kafka-integration" + } +} +``` + +Here is a sample API definition for the Kafka DataSource. + +```bash +{ + "created_at": "2022-09-15T16:19:07+03:00", + "api_model": {}, + "api_definition": { + "api_id": "7ec1a1c117f641847c5adddfdcd4630f", + "jwt_issued_at_validation_skew": 0, + "upstream_certificates": {}, + "use_keyless": true, + "enable_coprocess_auth": false, + "base_identity_provided_by": "", + "custom_middleware": { + "pre": [], + "post": [], + "post_key_auth": [], + "auth_check": { + "name": "", + "path": "", + "require_session": false, + "raw_body_only": false + }, + "response": [], + "driver": "", + "id_extractor": { + "extract_from": "", + "extract_with": "", + "extractor_config": {} + } + }, + "disable_quota": false, + "custom_middleware_bundle": "", + "cache_options": { + "cache_timeout": 60, + "enable_cache": true, + "cache_all_safe_requests": false, + "cache_response_codes": [], + "enable_upstream_cache_control": false, + "cache_control_ttl_header": "", + "cache_by_headers": [] + }, + "enable_ip_blacklisting": false, + "tag_headers": [], + "jwt_scope_to_policy_mapping": {}, + "pinned_public_keys": {}, + "expire_analytics_after": 0, + "domain": "", + "openid_options": { + "providers": [], + "segregate_by_client": false + }, + "jwt_policy_field_name": "", + "enable_proxy_protocol": false, + "jwt_default_policies": [], + "active": true, + "jwt_expires_at_validation_skew": 0, + "config_data": {}, + "notifications": { + "shared_secret": "", + "oauth_on_keychange_url": "" + }, + "jwt_client_base_field": "", + "auth": { + "disable_header": false, + "auth_header_name": "Authorization", + "cookie_name": "", + "name": "", + "validate_signature": false, + "use_param": false, + "signature": { + "algorithm": "", + "header": "", + "use_param": false, + "param_name": "", + "secret": "", + "allowed_clock_skew": 0, + "error_code": 0, + "error_message": "" + }, + "use_cookie": false, + "param_name": "", + "use_certificate": false + }, + "check_host_against_uptime_tests": false, + "auth_provider": { + "name": "", + "storage_engine": "", + "meta": {} + }, + "blacklisted_ips": [], + "graphql": { + "schema": "type Product {\n name: String\n price: Int\n inStock: Int\n}\n\ntype Query {\n topProducts(first: Int): [Product]\n}\n\ntype Subscription {\n productUpdated: Product\n}", + "enabled": true, + "engine": { + "field_configs": [{ + "type_name": "Query", + "field_name": "topProducts", + "disable_default_mapping": false, + "path": [ + "topProducts" + ] + }, + { + "type_name": "Subscription", + "field_name": "productUpdated", + "disable_default_mapping": false, + "path": [ + "productUpdated" + ] + } + ], + "data_sources": [{ + "kind": "GraphQL", + "name": "topProducts", + "internal": false, + "root_fields": [{ + "type": "Query", + "fields": [ + "topProducts" + ] + }], + "config": { + "url": "http://localhost:4002/query", + "method": "POST", + "headers": {}, + "default_type_name": "Product" + } + }, + { + "kind": "Kafka", + "name": "kafka-consumer-group", + "internal": false, + "root_fields": [{ + "type": "Subscription", + "fields": [ + "productUpdated" + ] + }], + "config": { + "broker_addresses": [ + "localhost:9092" + ], + "topics": [ + "product-updates" + ], + "group_id": "product-updates-group", + "client_id": "tyk-kafka-integration" + } + } + ] + }, + "type_field_configurations": [], + "execution_mode": "executionEngine", + "proxy": { + "auth_headers": { + "Authorization": "Bearer eyJvcmciOiI2MWI5YmZmZTY4OGJmZWNmZjAyNGU5MzEiLCJpZCI6IjE1ZmNhOTU5YmU0YjRmMDFhYTRlODllNWE5MjczZWZkIiwiaCI6Im11cm11cjY0In0=" + } + }, + "subgraph": { + "sdl": "" + }, + "supergraph": { + "subgraphs": [], + "merged_sdl": "", + "global_headers": {}, + "disable_query_batching": false + }, + "version": "2", + "playground": { + "enabled": false, + "path": "/playground" + }, + "last_schema_update": "2022-09-15T16:45:42.062+03:00" + }, + "hmac_allowed_clock_skew": -1, + "dont_set_quota_on_create": false, + "uptime_tests": { + "check_list": [], + "config": { + "expire_utime_after": 0, + "service_discovery": { + "use_discovery_service": false, + "query_endpoint": "", + "use_nested_query": false, + "parent_data_path": "", + "data_path": "", + "cache_timeout": 60 + }, + "recheck_wait": 0 + } + }, + "enable_jwt": false, + "do_not_track": false, + "name": "Kafka DataSource", + "slug": "kafka-datasource", + "analytics_plugin": {}, + "oauth_meta": { + "allowed_access_types": [], + "allowed_authorize_types": [], + "auth_login_redirect": "" + }, + "CORS": { + "enable": false, + "max_age": 24, + "allow_credentials": false, + "exposed_headers": [], + "allowed_headers": [ + "Origin", + "Accept", + "Content-Type", + "X-Requested-With", + "Authorization" + ], + "options_passthrough": false, + "debug": false, + "allowed_origins": [ + "*" + ], + "allowed_methods": [ + "GET", + "POST", + "HEAD" + ] + }, + "event_handlers": { + "events": {} + }, + "proxy": { + "target_url": "", + "service_discovery": { + "endpoint_returns_list": false, + "cache_timeout": 0, + "parent_data_path": "", + "query_endpoint": "", + "use_discovery_service": false, + "_sd_show_port_path": false, + "target_path": "", + "use_target_list": false, + "use_nested_query": false, + "data_path": "", + "port_data_path": "" + }, + "check_host_against_uptime_tests": false, + "transport": { + "ssl_insecure_skip_verify": false, + "ssl_min_version": 0, + "proxy_url": "", + "ssl_ciphers": [] + }, + "target_list": [], + "preserve_host_header": false, + "strip_listen_path": true, + "enable_load_balancing": false, + "listen_path": "/kafka-datasource/", + "disable_strip_slash": true + }, + "client_certificates": [], + "use_basic_auth": false, + "version_data": { + "not_versioned": true, + "default_version": "", + "versions": { + "Default": { + "name": "Default", + "expires": "", + "paths": { + "ignored": [], + "white_list": [], + "black_list": [] + }, + "use_extended_paths": true, + "extended_paths": { + "ignored": [], + "white_list": [], + "black_list": [], + "transform": [], + "transform_response": [], + "transform_jq": [], + "transform_jq_response": [], + "transform_headers": [], + "transform_response_headers": [], + "hard_timeouts": [], + "circuit_breakers": [], + "url_rewrites": [], + "virtual": [], + "size_limits": [], + "method_transforms": [], + "track_endpoints": [], + "do_not_track_endpoints": [], + "validate_json": [], + "internal": [] + }, + "global_headers": {}, + "global_headers_remove": [], + "global_response_headers": {}, + "global_response_headers_remove": [], + "ignore_endpoint_case": false, + "global_size_limit": 0, + "override_target": "" + } + } + }, + "jwt_scope_claim_name": "", + "use_standard_auth": false, + "session_lifetime": 0, + "hmac_allowed_algorithms": [], + "disable_rate_limit": false, + "definition": { + "enabled": false, + "name": "", + "default": "", + "location": "header", + "key": "x-api-version", + "strip_path": false, + "strip_versioning_data": false, + "versions": {} + }, + "use_oauth2": false, + "jwt_source": "", + "jwt_signing_method": "", + "jwt_not_before_validation_skew": 0, + "use_go_plugin_auth": false, + "jwt_identity_base_field": "", + "allowed_ips": [], + "request_signing": { + "is_enabled": false, + "secret": "", + "key_id": "", + "algorithm": "", + "header_list": [], + "certificate_id": "", + "signature_header": "" + }, + "org_id": "630899e6688bfe5fd6bbe679", + "enable_ip_whitelisting": false, + "global_rate_limit": { + "rate": 0, + "per": 0 + }, + "protocol": "", + "enable_context_vars": false, + "tags": [], + "basic_auth": { + "disable_caching": false, + "cache_ttl": 0, + "extract_from_body": false, + "body_user_regexp": "", + "body_password_regexp": "" + }, + "listen_port": 0, + "session_provider": { + "name": "", + "storage_engine": "", + "meta": {} + }, + "auth_configs": { + "authToken": { + "disable_header": false, + "auth_header_name": "Authorization", + "cookie_name": "", + "name": "", + "validate_signature": false, + "use_param": false, + "signature": { + "algorithm": "", + "header": "", + "use_param": false, + "param_name": "", + "secret": "", + "allowed_clock_skew": 0, + "error_code": 0, + "error_message": "" + }, + "use_cookie": false, + "param_name": "", + "use_certificate": false + }, + "basic": { + "disable_header": false, + "auth_header_name": "Authorization", + "cookie_name": "", + "name": "", + "validate_signature": false, + "use_param": false, + "signature": { + "algorithm": "", + "header": "", + "use_param": false, + "param_name": "", + "secret": "", + "allowed_clock_skew": 0, + "error_code": 0, + "error_message": "" + }, + "use_cookie": false, + "param_name": "", + "use_certificate": false + }, + "coprocess": { + "disable_header": false, + "auth_header_name": "Authorization", + "cookie_name": "", + "name": "", + "validate_signature": false, + "use_param": false, + "signature": { + "algorithm": "", + "header": "", + "use_param": false, + "param_name": "", + "secret": "", + "allowed_clock_skew": 0, + "error_code": 0, + "error_message": "" + }, + "use_cookie": false, + "param_name": "", + "use_certificate": false + }, + "hmac": { + "disable_header": false, + "auth_header_name": "Authorization", + "cookie_name": "", + "name": "", + "validate_signature": false, + "use_param": false, + "signature": { + "algorithm": "", + "header": "", + "use_param": false, + "param_name": "", + "secret": "", + "allowed_clock_skew": 0, + "error_code": 0, + "error_message": "" + }, + "use_cookie": false, + "param_name": "", + "use_certificate": false + }, + "jwt": { + "disable_header": false, + "auth_header_name": "Authorization", + "cookie_name": "", + "name": "", + "validate_signature": false, + "use_param": false, + "signature": { + "algorithm": "", + "header": "", + "use_param": false, + "param_name": "", + "secret": "", + "allowed_clock_skew": 0, + "error_code": 0, + "error_message": "" + }, + "use_cookie": false, + "param_name": "", + "use_certificate": false + }, + "oauth": { + "disable_header": false, + "auth_header_name": "Authorization", + "cookie_name": "", + "name": "", + "validate_signature": false, + "use_param": false, + "signature": { + "algorithm": "", + "header": "", + "use_param": false, + "param_name": "", + "secret": "", + "allowed_clock_skew": 0, + "error_code": 0, + "error_message": "" + }, + "use_cookie": false, + "param_name": "", + "use_certificate": false + }, + "oidc": { + "disable_header": false, + "auth_header_name": "Authorization", + "cookie_name": "", + "name": "", + "validate_signature": false, + "use_param": false, + "signature": { + "algorithm": "", + "header": "", + "use_param": false, + "param_name": "", + "secret": "", + "allowed_clock_skew": 0, + "error_code": 0, + "error_message": "" + }, + "use_cookie": false, + "param_name": "", + "use_certificate": false + } + }, + "strip_auth_data": false, + "id": "6323264b688bfe40b7d71ab3", + "certificates": [], + "enable_signature_checking": false, + "use_openid": false, + "internal": false, + "jwt_skip_kid": false, + "enable_batch_request_support": false, + "enable_detailed_recording": false, + "scopes": { + "jwt": {}, + "oidc": {} + }, + "response_processors": [], + "use_mutual_tls_auth": false + }, + "hook_references": [], + "is_site": false, + "sort_by": 0, + "user_group_owners": [], + "user_owners": [] +} +``` + +### REST + +The REST Datasource is a base component of UDG to help you add existing REST APIs to your data graph. By attaching a REST datasource to a field the engine will use the REST resource for resolving. + +We have a video which demoes this functionality for you. + + + +#### Using external REST API as a Datasource + +In order to use an external REST API as a Datasource you need to first navigate to the field which that Datasource should be attached to. + +1. Click on the field which should have a datasource attached +2. From the right-hand side *Configure data source* panel choose REST at the bottom in the *Add a new external data source* section + +ExternalREST + +3. Provide data source name, URL, method to be used. Optionally you can add headers information and configure field mapping + +ExternalRESTdetail + +4. Click the *Save & Update API* button to persist the configuration and generate a REST resolver, to resolve this field at runtime. + +#### Using Tyk REST API as a Datasource + +1. Click on the field which should have a datasource attached +2. From the right-hand side *Configure data source* panel choose *REST | Tyk* dropdown to see all available APIs + +InternalREST + +3. Choose which Tyk REST API you want to attach +4. Provide data source name, endpoint and method to be used. Optionally you can add headers information and configure field mapping + +InternalRESTdetail + +5. Click the *Save & Update API* button to persist the configuration and generate a REST resolver, to resolve this field at runtime. + +Once done the field you just configured will show information about data source type and name: + +datasourcesList + +#### Automatically creating REST UDG configuration based on OAS specification + +Tyk Dashboard users have an option to use Tyk Dashboard API and quickly transform REST API OAS specification into a UDG config and have it published in the Dasboard within seconds. + +See our [Postman collections](https://www.postman.com/tyk-technologies/workspace/tyk-public-workspace/overview) and fork `Tyk Dashboard API v5.1`. + +The endpoint you need to use is: + +```bash +POST /api/data-graphs/data-sources/import +``` + +Request body: + +```json +{ + "type": "string", + "data": "string" +} +``` + +`type` is an enum with the following possible values: + +- openapi +- asyncapi + +To import an OAS specification you need to choose `openapi`. + +If you are using Postman and your OAS document is in `yaml` format you can use a simple pre-request script to transform it into a `string`. + +```bash +pm.environment.set("oas_document", JSON.stringify(``)) +``` + +Then your request body will look like this: + +```json +{ + "type": "openapi", + "data": {{oas_document}} +} +``` + +### Tyk + +Tyk DataSources are exactly the same as GraphQL or REST DataSources. + +The only difference is that you can directly choose an endpoint from your existing APIs using a drop-down. +This makes it easier to set up and prevents typos compared to typing in the URL etc. + +From a technical perspective there's another difference: + +Tyk DataSources make it possible to call into existing APIs on a Tyk Gateway, even if those are marked as internal. +They also add a lot of flexibility as you can add custom middleware, AuthZ as well as AuthN, rate limits, quotas etc. to these. + +In general, it is advised to first add all APIs you'd wish to add to a data graph as a dedicated API to Tyk. +Then in a second step you'd add these to your data graph. + +Then in a second step you'd add these to your data graph. + + + +As of `v3.2.0` internal datasorces (`TykRESTDataSource` and `TykGraphQLDataSource`) will be deprecated at the API level. Please use `HTTPJSONDataSource` or `GraphQLDataSource` respectively. + + + +## Getting Started + +### Overview + + + +In this getting started tutorial we will combine 2 different HTTP services (Users and Reviews) into one single unified UDG API. Instead of querying these two services separately (and probably merging their responses later) we'll use UDG to get result from both the API's in one single response. + +#### Prerequisites + +- Access to Tyk Dashboard +- Node.JS v.13^ (only to follow this example) + +#### Running example services locally + + + +Clone repo + +```bash +git clone https://github.com/jay-deshmukh/example-rest-api-for-udg.git +``` + +Run it locally +```bash +cd example-rest-api-for-udg +``` + +```bash +npm i +``` + +```bash +npm run build +``` + +```bash +npm start +``` + +You should see following in your terminal + +``` +Users Service Running on http://localhost:4000 +Review service running on http://localhost:4001 +``` + +
+ +Now that we have Users service running on port `4000` and Reviews service running on port `4001` let's see how we can combine these two into one single UDG API in following tutorial. + + +### Creating Schema + + + +1. Create API + +To start with a Universal Data Graph from scratch head over to the dashboard and click on β€œAPIs” in the left menu. Then click the `β€œAdd New API”` and `UDG`. You might want to give your Universal Data Graph an individual name (i.e. `User-Reviews-Demo`) + + +2. Set Authentication + +To get started easily we'll set the API to `Keyless(Open)`. To do this, scroll down to the Authentication section. + + + +The API authentication is set to Keyless for demo purposes, it’s not recommended to use this setting in production, we’ll explore how to secure the UDG later in this guide. + + + +3. Configure Schema + +Switch to schema tab in your designer and you should already see a default schema. We will edit the schema as follows to connect with our datasources later. + +```gql +type Mutation { + default: String +} + +type Query { + user(id: String): User +} + +type Review { + id: String + text: String + userId: String + user: User +} + +type User { + id: String + username: String + reviews: [Review] +} + +``` + +You can also import an existing schema using the import feature, file types supported : `gql` , `graphql` and `graphqls`. + +4. Save + +Click on save button and that should create our first UDG API + +
+ +Now if we try to query our UDG API it should error at this moment as we do not have any data-source attached to it, let's see how we can do that in next section. + +### Connect Datasource + + + +Upon navigating to schema tab on API details page you’ll see a split screen view with schema and user interface for available fields to configure the datasource. + +You can attach datasource to each individual field and can also re-use the datasource for multiple fields for performance benefits in case it has similar configuration (it needs to use the same upstream URL and method). + +We will start with attaching datasource to user query using following approach. + +#### 1. Select field to attach datasource. +Upon selecting the `Users` field on type `Query`, you'll see the options to configure that field for following kinds of datasources. + +* REST +* GraphQL +* Kafka + +#### 2. Select datasource type. + +Since our upstream services are REST, we'll select REST as datasource type but other kind of datasources can be used as well: + +* *Use external data source*: Will allow to configure the field to resolve with the external API (outside Tyk environment) +* *Using exiting APIs*: Which will allow to configure the field with the API that already exists in Tyk environment. +* *Re-use already configured data source*: If you already have configured a data source for the same API you can re-use the same data-source. If the data source is reused the endpoint will only be called once by Tyk. + +You can learn more about it [here](#udg) + +#### 3. Configure datasource details. + +Configure the data source with the following fields + +**Name** + + Enter a unique datasource name configuration to reuse it in the future. We will name this as `getUserById` for the given example. +When configuring a datasource name with Tyk Dashboard, a default name is created automatically by concatenating the field name and the GraphQL type name with an underscore symbol in between. For example, _getUserById_Query_. This name is editable and can be changed by the user. + +**URL** + +We will use the URL for our `Users` service which returns details of an user for given `id` i.e `http://localhost:4000/users/:id`. + +To dynamically inject the `id` for every request made, we can use templating syntax and inject `id` with user supplied argument or we can also use session object. + +To avoid typos in template you can use the UI component to automatically create a template for you. You can select from the available argument and object template options from the list generated by input component which is triggered by entering `{` in input. + +To learn more about arguments click [here](#arguments) + +To learn more about reusing response fields click [here](#reusing-response-fields) + +#### 4. Enter datasource name. + +Enter a unique datasource name your configuration to reuse it in the future. We will name this as `getUserById` for the given example + +#### 5. Select HTTP method for the URL. + +You can select the HTTP method for your upstream url. Which should be `GET` in our case. + +#### 6. Add headers (Optional) + +If you upstream expects headers, you can supply them using this. +You can also use templating syntax here to reuse request headers. + +#### 7. Select field mapping + +Keep the field mapping disabled by default. +You can use field mapping to map the API response with your schema. + +You can learn more about field mapping [here](#field-mappings) + +#### 8. Save data source + +It is important to save the datasource configuration in order to reflect the changes in your API definition. +The` β€œSave & Update API” `button will persist the full API definition. + +#### 9. Update API and Test + +Click Update the API. + +You can now query your UDG API of `user` using the Playground tab in API designer + +```gql +query getUser { + user(id:"1"){ + username + id + reviews { + id + text + user { + id + } + + } + } +} +``` + +The above query should return the response as follows + +```json +{ + "data": { + "user": { + "username": "John Doe", + "id": "1", + "reviews": null + } + } +} +``` + +#### Challenge + +1. Try to resolve `reviews` field on type `Users` +2. Try to resolve `users` field on type `Reviews` + +As you can see our query resolved for user details but returns `null` for `reviews`. + +This happens because we haven't defined datasource on field level for `reviews` on type `User`. + +``` +Notes +- For reviews field on type User +- - Description :: get reviews by userId +- - URL :: http://localhost:4001/reviews/:userId +- - Method :: GET + +- For users field on type Review +- - Description :: get user details by Id +- - URL :: http://localhost:4000/users/:userId +- - Method :: GET + +- You can reuse response filed using templating syntax example `{{.object.id}}` +``` + + + +You can find the solution for the challenge in the above video. + + + +
+ +Now that we have linked datasources for our queries, let's see how we can do the same for mutations in the next section. + + +### Mutations + + + +Now that we have attached datasources to our `Query` in the schema let's try to do the same for `Mutation`. + +#### Steps for Configuration + +1. **Update Schema** + + ```gql + type Mutation { + addReview(text: String, userId: String): Review + deletReview(reviewId: String): String + } + ``` + + We’ll update the Mutatation type as above where we’ll add two operations + + * `addReview`: Which accepts two `arguments` (i.e `text` and `userId`) and adds a new review by making a `POST` request to `http://localhost:4001/reviews` endpoint, which expects something like the following in the request payload + + ``` + { + "id": "1", // UserId of the user posting review + "text": "New Review by John Doe11" // review text + } + ``` + * `deleteReview`: Which accepts one `argument` (i.e `reviewId`), that deletes a review by making a `DELETE` request to `http://localhost:4001/reviews/:reviewId` + +2. **Configure datasource.** + + Follow these steps to configure a data source for the `Mutation`. + + * Navigate to schema tab in the api where you would see the split screen view of schema editor on left and list of configurable fields on right + * Select `addReview` field from `Mutation` type + * Select `REST` option + * Set a unique datasource name + * Set the URL as `http://localhost:4001/reviews` + * Select method type as `POST` + * Set request body to relay the graphql arguments to our upstream payload as follows: + + ``` + { + "text": "{{.arguments.text}}", + "userId": "{{.arguments.userId}}" + } + ``` + * Update the API + +3. **Execute mutation operation** + + We can now test our mutation operation with the playground in API designer using the following operation + + ```gql + mutation AddReview { + addReview(text: "review using udg", userId:"1"){ + id + text + } + } + ``` + + That should return us the following response: + + ```gql + { + "data": { + "addReview": { + "id": "e201e6f3-b582-4772-b95a-d25199b4ab82", + "text": "review using udg" + } + } + } + + ``` + + +#### Challenge + +Configure a datasource to delete a review using review id. + +``` +Notes + +- For users field on type Review +- - Description :: delete review using reviewId +- - URL :: http://localhost:4001/reviews/:reviewId +- - Method :: DELETE + +- Enable field mapping to map your API response + +``` + + +You can find the solution for the challenge in the above video. + + + +
+ +Now that we have a good idea how we could do CRUD operations with UDG APIs, let's see how we can secure them using policies + +### Security + + + +Due to the nature of graphql, clients can craft complex or large queries which can cause your upstream APIs to go down or have performance issues. + +Some of the common strategies to mitigate these risks include + +- Rate limiting +- Throttling +- Query depth limiting + + +For this tutorial we'll mitigate these risks using `Query Depth Limit` but you can also use common strategies like rate limiting and throttling, which you can read more about [here](/api-management/rate-limit) + +#### Steps for Configuration + +1. **Set authentication mode** + + In you Api designer core settings tab scroll down to Authentication section and set the authentication mode `Authentication Token` and update the API. + + Our API is not open and keyless anymore and would need appropriate Authentication token to execute queries. + +2. **Applying to query depth** + + Currently if users want they could run queries with unlimited depth as follows + + ```gql + query getUser { + user(id: "1") { + reviews { + user { + reviews { + user { + reviews { + user { + reviews { + user { + id + } + } + } + } + } + } + } + } + } + } + + ``` + + To avoid these kind of scenarios we will set query depth limit on the keys created to access this API. + + Although we can directly create keys by selecting this API but we'll use policy as it will make it easier to update keys for this API in future. You can read more about policies [here](/api-management/policies#what-is-a-security-policy) + + **Create Policy** + - Navigate to policies page + - Click Add Policy + - Select our API from Access Rights table + - Expand `Global Limits and Quota` section + - Unselect `Unlimited Query Depth` and set limit to `5` + - Switch to configuration tab + - Set policy name (eg. user-reviews-policy) + - Set expiration date for the keys that would be created using this policy + - Click on create policy + + **Create a key using above policy** + - Navigate to keys page + - Click Add Key + - Select our newly created policy + - Click create key + - Copy the key ID + + Now if you try to query our UDG API using the key you should see an error as follows + + ```json + { + "error": "depth limit exceeded" + } + ``` + + + +Watch the video above to see how you can use these policies to publish your UDG APIs on your portal with documentation and playground. + + + +### Field Based Permissions + + + +It is also possible to restrict user's based on fields using policies. For example you can create two policies + +1. For read-only access for users to only execute queries. + +2. For read and write access to run mutations and queries both. + +#### Creating keys with read-only access + +**Create Policy** + +- Navigate to policies page +- Click Add Policy +- Select our API from Access Rights table +- Expand Api panel under global section +- Toggle Field-Based Permissions and check Mutation +- Switch to configuration tab +- Set policy name (eg. user-reviews-policy-read-only) +- Set expiration date for the keys that would be created using this policy +- Click on create policy + +Now keys created using these policies cannot be used for mutations. + +### Header Forwarding + +**Min Version: Tyk v3.2.0** + +You’re able to configure upstream Headers dynamically, that is, you’re able to inject Headers from the client request into UDG upstream requests. For example, it can be used to access protected upstreams. + +The syntax for this is straight forward: + +``` +{{.request.headers.someheader}} +``` + + In your data sources, define your new Header name and then declare which request header's value to use: + + Forwarding Headers + + That's it! + + + +A JSON string has to be escaped before using as a header value. For example: +``` +{\"hello\":\"world\"} +``` + + + +### UDG Examples + +It is possible to import various UDG examples from the [official Tyk examples repository](https://github.com/TykTechnologies/tyk-examples). + +We offer 3 ways of importing an example into Tyk: + - Using [tyk-sync](/api-management/sync/use-cases#synchronize-api-configurations-with-github-actions) + - Manually import via [Dashboard API Import](/api-management/gateway-config-managing-classic#import-an-api) +- Using Tyk Dashboard to browse and import the examples directly + +#### Import via tyk-sync + +Please follow the [tyk-sync documentation](/product-stack/tyk-sync/commands#examples-publish-command) to learn more about this approach. + +#### Import via Tyk Dashboard API Import + +Navigate to an example inside the [examples repository](https://github.com/TykTechnologies/tyk-examples) and grab the relevant API definition from there. +Then you can move in the Dashboard UI to `APIs -> Import API` and select `Tyk API` as source format. + +Paste the API definition inside the text box and hit `Import API`. + +You can find more detailed instructions in the [Dashboard API Import documentation section](/api-management/gateway-config-managing-classic#import-an-api). + +#### Import via Tyk Dashboard UI + +Navigate to `Data Graphs` section of the Tyk Dashboard menu. If you haven't yet created any Universal Data Graphs you will see three options in the screen - one of them `Try example data graph` - will allow you to browse all examples compatible with your Dashboard version and choose the one you want to import. + +Examples in Dashboard + +In case you have created data graphs before and your screen looks different, just use the `Add Data Graph` button and in the next step decide if you want to create one yourself, or use one of the available examples. + +Examples in Dashboard New Graph \ No newline at end of file diff --git a/api-management/event-driven-apis.mdx b/api-management/event-driven-apis.mdx new file mode 100644 index 000000000..4689441e6 --- /dev/null +++ b/api-management/event-driven-apis.mdx @@ -0,0 +1,1016 @@ +--- +title: "Tyk Streams – Manage Event-Driven APIs" +description: "Introduction to Tyk Streams" +keywords: "Tyk Streams, Glossary, Use Cases, Asynchronus APIs, Async, Configuration" +sidebarTitle: "Tyk Streams" +--- + +{/* ## TODO: Add availability */} + +## Overview + +*Tyk Streams* is a feature of the Tyk API management platform that enables organizations to securely expose, +manage and monetize real-time event streams and asynchronous APIs. + +With *Tyk Streams*, you can easily connect to event brokers and streaming platforms, such as +[Apache Kafka](https://github.com/TykTechnologies/tyk-pro-docker-demo/tree/kafka), and expose them as +managed API endpoints for internal and external consumers. + +
+Tyk Streams Overview +
+ +The purpose of Tyk Streams is to provide a unified platform for managing both synchronous APIs (such as REST and +GraphQL) and asynchronous APIs, in addition to event-driven architectures. This allows organizations to leverage the +full potential of their event-driven systems while maintaining the same level of security, control and visibility they +expect from their API management solution. + +### Why use Tyk Streams + +Tyk Stream is a powerful stream processing engine integrated into the Tyk API Gateway, available as part of the Enterprise Edition. It allows you to manage asynchronous APIs and event streams as part of your API ecosystem. It provides a range of capabilities to support async API management, including: + +- **Protocol Mediation**: Tyk Streams can mediate between different asynchronous protocols and API styles, such as WebSocket, Server-Sent Events (SSE), and Webhooks. This allows you to expose your event streams in a format compatible with your consumers' requirements. +- **Security**: Apply the same security policies and controls to your async APIs as you do to your synchronous APIs. This includes features like authentication and authorization. +- **Transformations**: Transform and enrich your event data on the fly using Tyk's powerful middleware and plugin system. This allows you to adapt your event streams to meet the needs of different consumers. +- **Analytics**: Monitor the usage and performance of your async APIs with detailed analytics and reporting. Gain insights into consumer behavior and system health. +- **Developer Portal**: Publish your async APIs to the Tyk Developer Portal, which provides a centralised catalog for discovery, documentation, and subscription management. + +--- +## Getting Started + +This guide will help you implement your first event-driven API with Tyk in under 15 minutes. To illustrate the capabilities of Tyk Streams, let's consider an example: building a basic asynchronous chat application, nicknamed **Chat Jippity**. + +In this scenario, a user sends a message (e.g., asking for a joke) via a simple web interface and receives an asynchronous response generated by a backend service. + +This application flow demonstrates two key patterns enabled by Tyk Streams: acting as an **API Producer Gateway** and an **API Consumer Gateway**. + +```mermaid +sequenceDiagram + participant Browser + participant Tyk Gateway + participant Joker Service + + Note over Browser, Tyk Gateway: Consumer - SSE Connection Setup + Browser->>+Tyk Gateway: Make Server Side Events (SSE) Connection + Tyk Gateway-->>-Browser: Connection Established + + Note over Browser, discKafka: Producer - Message Flow for Request/Response + Browser->>+Tyk Gateway: (1) POST /chat (Request: "Tell me a joke") + Tyk Gateway->>+chatKafka: (2) Publish message to 'chat' topic + chatKafka-->>-Tyk Gateway: Ack (implied) + chatKafka-->>+Joker Service: (3) Consume message from 'chat' topic + Note over Joker Service: Processes request, gets joke + Joker Service->>+discKafka: (4) Publish response to 'discussion' topic + discKafka-->>-Joker Service: Ack (implied) + discKafka-->>+Tyk Gateway: (6) Consume/Receive message from 'discussion' topic + Tyk Gateway-->>-Browser: (7) Push joke response (via established WS/SSE connection) + Tyk Gateway-->>-discKafka: Ack (implied) +``` + +Let's break down how Tyk Streams facilitates this, focusing on the distinct producer and consumer roles Tyk plays: + +### Example Scenario + +#### Tyk as an API Producer Gateway (Client to Stream) + +* **Goal:** Allow a client (like a browser or a script) to easily send a message into our asynchronous system without needing direct access or knowledge of the backend message broker (Kafka in this case). +* **Scenario:** The user types "Tell me a joke" into the chat interface and hits send. +* **Flow:** + 1. The browser sends a standard HTTP `POST` request to an endpoint exposed by Tyk Gateway (e.g., `/chat`). + 2. **Tyk Streams Role (Producer):** Tyk Gateway receives this `POST` request. An API definition configured with Tyk Streams defines this endpoint as an *input*. Tyk takes the request payload and *publishes* it as a message onto a designated backend topic (e.g., the `chat` topic in Kafka). + 3. A backend service (our "Joker Service") listens to the `chat` topic for incoming requests. +* **Value Demonstrated:** + * **Protocol Bridging:** Tyk translates a synchronous HTTP POST into an asynchronous Kafka message. + * **Decoupling:** The browser only needs to know the Tyk HTTP endpoint, not the Kafka details (brokers, topic name, protocol). + * **API Management:** Tyk can enforce authentication, rate limits, etc., on the `/chat` endpoint before the message even enters the Kafka system. + +#### Tyk as an API Consumer Gateway (Stream to Client) + +* **Goal:** Deliver the asynchronous response (the joke) from the backend system to the client in real time. +* **Scenario:** The "Joker Service" has processed the request and generated a joke. It needs to send this back to the originating user's browser session. +* **Flow:** + 1. The Joker Service *publishes* the joke response as a message onto a different backend topic (e.g., the `discussion` topic in Kafka). + 2. **Tyk Streams Role (Consumer):** Tyk Gateway is configured via another (or the same) API definition to *subscribe* to the `discussion` topic. + 3. When Tyk receives a message from the `discussion` topic, it *pushes* the message content (the joke) to the appropriate client(s) (provided they have already established a connection) using a suitable real-time protocol like Server-Sent Events (SSE) or WebSockets. + **Note:** In case of multiple clients, events would round-robin amongst the consumers. + +* **Value Demonstrated:** + * **Protocol Bridging:** Tyk translates Kafka messages into SSE messages suitable for web clients. + * **Decoupling:** The browser doesn't need a Kafka client; it uses standard web protocols (SSE/WS) provided by Tyk. The Joker Service only needs to publish to Kafka, unaware of the final client protocol. + +The following sections will guide you through the prerequisites and steps to configure Tyk Gateway to implement this use case. + +### Prerequisites + +- **Docker**: We will run the entire Tyk Stack on Docker. For installation, refer to this [guide](https://docs.docker.com/desktop/setup/install/mac-install/). +- **Git**: A CLI tool to work with git repositories. For installation, refer to this [guide](https://git-scm.com/downloads) +- **Dashboard License**: We will configure Streams API using Dashboard. [Contact our team](https://tyk.io/contact/) to obtain a license or get self-managed trial license by completing the registration on our [website] +- **Curl and JQ**: These tools will be used for testing. + +### Instructions + +1. **Clone Git Repository:** + + The [tyk-demo](https://github.com/TykTechnologies/tyk-demo) repository offers a docker-compose environment you can run locally to explore Tyk streams. Open your terminal and clone the git repository using the command below. + + ```bash + git clone https://github.com/TykTechnologies/tyk-demo + cd tyk-demo + ``` + +2. **Enable Tyk Streams:** + + By default, Tyk Streams is disabled. To enable Tyk Streams in the Gateway and Dashboard, you need to configure the following settings: + + Create an `.env` file and populate it with the values below: + + ```bash + DASHBOARD_LICENCE= + GATEWAY_IMAGE_REPO=tyk-gateway-ee + TYK_DB_STREAMING_ENABLED=true + TYK_GW_STREAMING_ENABLED=true + ``` + + - `DASHBOARD_LICENCE`: Add your license key. Contact [our team](https://tyk.io/contact/) to obtain a license. + - `GATEWAY_IMAGE_REPO`: Tyk Streams is available as part of the Enterprise Edition of the Gateway. + - `TYK_DB_STREAMING_ENABLED` and `TYK_GW_STREAMING_ENABLED`: These must be set to `true` to enable Tyk Streams in the Dashboard and Gateway, respectively. Refer to the [configuration options](/tyk-oss-gateway/configuration#streaming) for more details. + +3. **Start Tyk Streams** + + Execute the following command: + ```bash + ./up.sh + ``` + + + + + This script also starts `Kafka` within a Docker container, which is necessary for this guide. + + + + This process will take a few minutes to complete and will display some credentials upon completion. Copy the Dashboard **username, password, and API key**, and save them for later use. + ``` + β–Ύ Tyk Demo Organisation + Username : admin-user@example.org + Password : 3LEsHO1jv1dt9Xgf + Dashboard API Key : 5ff97f66188e48646557ba7c25d8c601 + ``` + +4. **Verify Setup:** + + Open Tyk Dashboard in your browser by visiting [http://localhost:3000](http://localhost:3000) or [http://tyk-dashboard.localhost:3000](http://tyk-dashboard.localhost:3000) and login with the provided **admin** credentials. + +5. **Create Producer API:** + + Create a file `producer.json` with the below content: (**Note:** `tyk-demo-kafka-1` is the hostname used to access Kafka running in a container; alternatively, you can use the IP address assigned to your computer.) + + + + + ```json + { + "components": {}, + "info": { + "title": "jippity-chat", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": {}, + "servers": [ + { + "url": "http://tyk-gateway.localhost:8080/jippity-chat/" + } + ], + "x-tyk-api-gateway": { + "info": { + "name": "jippity-chat", + "state": { + "active": true + } + }, + "server": { + "listenPath": { + "value": "/jippity-chat/", + "strip": true + } + }, + "upstream": { + "url": "" + } + }, + "x-tyk-streaming": { + "streams": { + "default_stream": { + "input": { + "http_server": { + "address": "", + "allowed_verbs": [ + "POST" + ], + "path": "/chat", + "rate_limit": "", + "timeout": "5s" + }, + "label": "" + }, + "output": { + "kafka": { + "addresses": ["tyk-demo-kafka-1:9092"], + "max_in_flight": 10, + "topic": "chat" + }, + "label": "" + } + } + } + } + } + ``` + + + + + + Create the API by executing the following command. Be sure to replace `` with the API key you saved earlier: + + ```bash + curl -H "Authorization: " -H "Content-Type: application/vnd.tyk.streams.oas" http://localhost:3000/api/apis/streams -d @producer.json + ``` + + You should expect a response similar to the one shown below, indicating success. Note that the Meta and ID values will be different each time: + ```bash + {"Status":"OK","Message":"API created","Meta":"67e54cadbfa2f900013b501c","ID":"3ddcc8e1b1534d1d4336dc6b64a0d22f"} + ``` + +5. **Create Consumer API:** + + Create a file `consumer.json` with the below content: (**Note:** `tyk-demo-kafka-1` is the hostname used to access Kafka running in a container; alternatively, you can use the IP address assigned to your computer.) + + + + + ```json + { + "components": {}, + "info": { + "title": "jippity-discuss", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": {}, + "servers": [ + { + "url": "http://tyk-gateway.localhost:8080/jippity-discuss/" + } + ], + "x-tyk-api-gateway": { + "info": { + "name": "jippity-discuss", + "state": { + "active": true + } + }, + "server": { + "listenPath": { + "value": "/jippity-discuss/", + "strip": true + } + }, + "upstream": { + "url": "" + } + }, + "x-tyk-streaming": { + "streams": { + "default_stream": { + "input": { + "kafka": { + "addresses": ["tyk-demo-kafka-1:9092"], + "auto_replay_nacks": true, + "checkpoint_limit": 1024, + "consumer_group": "tyk-streams", + "target_version": "3.3.0", + "topics": ["discussion"] + }, + "label": "" + }, + "output": { + "http_server": { + "address": "", + "allowed_verbs": [ + "GET" + ], + "stream_path": "/sse" + }, + "label": "" + } + } + } + } + } + ``` + + + + + + Create the API by executing the following command. Be sure to replace `` with the API key you saved earlier: + + ```bash + curl -H "Authorization: " -H "Content-Type: application/vnd.tyk.streams.oas" http://localhost:3000/api/apis/streams -d @consumer.json + ``` + + You should expect a response similar to the one shown below, indicating success. Note that the Meta and ID values will be different each time: + ```bash + {"Status":"OK","Message":"API created","Meta":"67e54cadbfa2f900013b501c","ID":"3ddcc8e1b1534d1d4336dc6b64a0d22f"} + ``` + +7. **Start Joker Service:** + + Create a file `joker-service.sh` with the below content: + + + + + ```bash + #!/bin/bash + + # Container name + CONTAINER="tyk-demo-kafka-1" + + # Kafka bootstrap server + BOOTSTRAP_SERVER="localhost:9092" + + # Topics + SOURCE_TOPIC="chat" + TARGET_TOPIC="discussion" + + # Kafka consumer and producer commands + CONSUMER_CMD="/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --topic $SOURCE_TOPIC" + PRODUCER_CMD="/opt/kafka/bin/kafka-console-producer.sh --broker-list $BOOTSTRAP_SERVER --topic $TARGET_TOPIC" + + # Joke API URL + JOKE_API="https://icanhazdadjoke.com/" + + echo "Starting to listen for messages on '$SOURCE_TOPIC'..." + + # Run the consumer in the container, pipe output to a while loop + docker exec -i $CONTAINER bash -c "$CONSUMER_CMD" | while IFS= read -r message; do + # Skip empty lines + [ -z "$message" ] && continue + + echo "Received message from $SOURCE_TOPIC: $message" + + # Fetch a random joke from the API and extract it with jq + joke=$(curl -s -H "Accept: application/json" "$JOKE_API" | jq .joke) + + # Check if joke was fetched successfully + if [ -n "$joke" ]; then + response_message="In response to '$message': Here's a dad joke - $joke" + else + response_message="In response to '$message': Couldn't fetch a joke, sorry!" + fi + + # Send the response message to 'discussion' topic + echo "$response_message" | docker exec -i $CONTAINER bash -c "$PRODUCER_CMD" + + echo "Posted to $TARGET_TOPIC: $response_message" + done + + echo "Consumer stopped." + ``` + + + + + + Make the file executable and start the service. + + ```bash + chmod +x joker-service.sh + ./joker-service.sh + ``` + +8. **Test the API:** + + Open a terminal and execute the following command to start listening for messages from the Consumer API you created: + + ```bash + curl -N http://tyk-gateway.localhost:8080/jippity-discuss/sse + ``` + + In a second terminal, execute the command below to send a message to the Producer API. You can run this command multiple times and modify the message to send different messages: + + ```bash + curl -X POST http://tyk-gateway.localhost:8080/jippity-chat/chat -H "Content-Type: text/plain" -d "Tell me a joke." + ``` + + Now, you will see the message appear in the terminal window where you are listening for messages. + +**Wrapping Up:** And that’s itβ€”you’ve just created an Async API with Tyk Streams! From here, you can tweak the configuration to suit your needs, [explore glossary](#glossary), or explore more [advanced use cases](#use-cases). + +--- +## How It Works + +Tyk Streams seamlessly integrates with the Tyk API Gateway, extending its capabilities beyond traditional synchronous request/response patterns to natively support asynchronous APIs and event-driven architectures. + +This section details the architecture, components, and request processing flow of Tyk Streams. + +### High-Level Architecture + +At a high level, Tyk Streams operates within the Tyk ecosystem, interacting with several key elements: + +* **Tyk API Gateway**: The core API management platform. It is the entry point, handling initial request processing (like authentication and rate limiting) and routing requests. +* **Tyk Streams Module**: An integrated extension within the Gateway designed explicitly for asynchronous communication. It intercepts relevant requests and manages the streaming logic. +* **Event Brokers / Sources**: External systems that act as the origin or destination for data streams. Examples include Apache Kafka, NATS, MQTT brokers, or WebSocket servers. Tyk Streams connects to these systems based on API configuration. +* **Upstream Services / APIs**: The backend systems, microservices, or APIs that ultimately produce or consume the data being streamed or processed via Tyk Streams. + +Think of the Tyk Gateway as the central dispatch for all API traffic. When traffic requires asynchronous handling (like pushing data to Kafka or subscribing to an MQTT topic), the integrated Tyk Streams module manages the interaction with the specific Event Broker and Upstream Service according to the API's configuration. + +### Internal Components of Tyk Streams + +To manage these asynchronous interactions, the Tyk Streams module relies on several internal components operating within the Gateway: + +1. **Stream Middleware**: This component plugs into the Tyk Gateway's request processing chain. It runs *after* standard middleware like authentication and rate limiting but *before* the request would normally be proxied. Its job is to inspect incoming requests, identify if they match a configured stream path, and if so, divert them from the standard proxy flow into the stream handling logic. +2. **Stream Manager**: Acts as the supervisor for streaming operations defined in an API. A given stream configuration is responsible for initializing, managing the lifecycle (starting/stopping), and coordinating the necessary `Stream Instances`. It ensures the correct streaming infrastructure is ready based on the API definition. +3. **Stream Instance**: Represents a running, active instance of a specific stream processing task. Each instance executes the logic defined in its configuration – connecting to an event broker, processing messages, transforming data, handling connections, etc. There can be multiple instances depending on the configuration and workload. +4. **Stream Analytics**: This component captures connection attempts and errors related to HTTP outputs. This data can be exported to popular analytics platforms like Prometheus, OpenTelemetry, and StatsD. + +The following diagram shows the relationships and primary interactions between these internal components and how they relate to the Gateway and Upstream API: + +```mermaid +graph TD + idClient[Client] + idTykGateway[Tyk Gateway] + idStreamMiddleware[Stream Middleware] + idStreamManager[Stream Manager] + idStreamAnalytics[Stream Analytics] + idStreamInstance[Stream Instance] + idUpstreamAPI[Upstream API] + + idClient -- Request --> idTykGateway + idTykGateway -- Response --> idClient + idTykGateway -- Process Request --> idStreamMiddleware + idStreamMiddleware -- Response --> idTykGateway + idStreamMiddleware -- Configure & Manage --> idStreamManager + idStreamMiddleware -- Capture Analytics --> idStreamAnalytics + idStreamManager -- Create & Run --> idStreamInstance + idStreamInstance -- Processed Response --> idStreamMiddleware + idStreamInstance -- Process Data --> idUpstreamAPI + idUpstreamAPI -- Response --> idStreamInstance +``` + +### Request Processing Flow + +Understanding how these components work together is key. Here’s the typical flow when a request interacts with a Tyk Streams-enabled API endpoint: + +```mermaid +sequenceDiagram + participant Client + participant TykGateway as Tyk Gateway + participant StreamingMiddleware as Streaming Middleware + participant StreamManager as Stream Manager + participant StreamInstance as Stream Instance + participant UpstreamService as Upstream Service + + Client->>TykGateway: HTTP Request + TykGateway->>StreamingMiddleware: Process Request + StreamingMiddleware->>StreamingMiddleware: Strip Listen Path + StreamingMiddleware->>StreamingMiddleware: Check if path is handled by streams + + alt Path handled by streams + StreamingMiddleware->>StreamManager: Create/Get Stream Manager for request + StreamingMiddleware->>StreamManager: Match request to route + StreamManager->>StreamInstance: Handle request + StreamInstance->>Client: Stream response + else Not handled by streams + StreamingMiddleware-->>TykGateway: Continue middleware chain + TykGateway->>UpstreamService: Proxy request + UpstreamService->>TykGateway: Response + end + + TykGateway->>Client: HTTP Response +``` + +1. **Request Arrival & Gateway Pre-processing**: A client sends a request to an API endpoint managed by Tyk Gateway. The request passes through the initial middleware, such as authentication, key validation, and rate limiting. +2. **Streaming Middleware Interception**: The request reaches the `Stream Middleware`. It checks the request path against the stream routes defined in the API configuration. +3. **Path Matching**: + * **If No Match**: The `Stream Middleware` will respond with a `404 Not Found` status code. + * **If Match**: The request is intended for a stream. The `Stream Middleware` takes control of the request handling. +4. **Stream Manager Coordination**: The middleware interacts with the `Stream Manager` associated with the API's stream configuration. The `Stream Manager` ensures the required `Stream Instance`(s) are initialized and running based on the loaded configuration. This might involve creating a new instance or reusing a cached one. +5. **Stream Instance Execution**: The instance then executes its defined logic, interacting with the configured `Upstream Service / Event Broker` (e.g., publishing a message to Kafka, subscribing to an MQTT topic, forwarding data over a WebSocket). +6. **Analytics Capture**: The `Stream Analytics` component captures relevant metrics throughout the stream handling process. +8. **Final Gateway Response**: The response or data stream generated by the streaming components is relayed back through the Gateway to the originating client. + +### Scaling and Availability + +The beauty of Tyk Streams is that it’s baked into the Tyk Gateway, so it scales naturally as your API traffic ramps upβ€”no extra setup or separate systems required. It’s efficient too, reusing the same resources as the Gateway to keep things lean. + +--- +## Configuration Options + +Configuring Tyk Streams involves two distinct levels: + +1. **System-Level Configuration:** Enabling the Streams functionality globally within your Tyk Gateway and Tyk Dashboard instances. This activates the necessary components but doesn't define any specific streams. +2. **API-Level Configuration:** Defining the actual stream behaviors (inputs, outputs, processing logic) within a specific Tyk OAS API Definition using the `x-tyk-streaming` extension. This is where you specify *how* data flows for a particular asynchronous API. + +Let's look at each level in detail. + +### System-Level Configuration + +Before you can define streams in your APIs, you must enable the core Streams feature in both the Tyk Gateway and, if you're using it for management, the Tyk Dashboard. + +#### Tyk Gateway + +Enable the Streams processing engine within the Gateway by setting `enabled` to `true` in the `streaming` section of your `tyk.conf` file or via environment variables. + + + +```json +{ +// Partial config from tyk.conf + "streaming": { + "enabled": true // Required to activate Streams functionality + }, +// ... more config follows +} +``` + + +```bash +export TYK_GW_STREAMING_ENABLED=true +``` + + + +Refer to the [Tyk Gateway Configuration Reference](/tyk-oss-gateway/configuration#streamingenabled) for more details on this setting. + +#### Tyk Dashboard + +If you manage your APIs via the Tyk Dashboard, you must also enable Streams support within the Dashboard configuration (`tyk_analytics.conf`) to expose Streams-related UI elements and functionality. + + + +```json +{ +// Partial config from tyk_analytics.conf + "streaming": { + "enabled": true // Required to activate Streams functionality + }, +// ... more config follows +} +``` + + +```bash +export TYK_DB_STREAMING_ENABLED=true +``` + + + +Refer to the [Tyk Dashboard Configuration Reference](/tyk-dashboard/configuration#streamingenabled) for more details. + +### API-Level Configuration + +Once Streams is enabled at the system level, you define the specific behavior for each asynchronous API within its Tyk Open API Specification (OAS) definition. This is done using the `x-tyk-streaming` vendor extension. + +The core structure under `x-tyk-streaming` is the `streams` object, which contains one or more named stream configurations. Each named stream defines: + +* **`input`**: Specifies how data enters this stream (e.g., via an HTTP request, by consuming from Kafka, connecting via WebSocket). +* **`output`**: Specifies where the data goes after processing (e.g., published to Kafka, sent over WebSocket, delivered via webhook). + +```json +{ +// Partial config from Tyk OAS API Definition + "x-tyk-streaming": { + "streams": { + "your_stream_name": { // A unique name for this stream configuration within the API + "input": { + // Input configuration object - specifies the data source + // Example: "http_server": { ... } or "kafka": { ... } + }, + "output": { + // Output configuration object - specifies the data destination + // Example: "kafka": { ... } or "websocket_server": { ... } + } + // Optional processing/transformation steps can also be defined here + }, + "another_stream": { // You can define multiple independent streams + "input": { ... }, + "output": { ... } + } + } + }, +// ... more config follows +} +``` + +**Available Input and Output Types:** + +Tyk supports various connector types for both `input` and `output`. **The specific types available (like `http_server`, `kafka`, `http_client`, etc.) and their respective configuration parameters are detailed in the [Tyk Streams Configuration Reference](/api-management/stream-config).** Please consult this reference page for the full list of options and how to configure each one. + +**Example Configuration:** + + + + +```json +{ +// Partial config from Tyk OAS API Definition + "x-tyk-streaming": { + "streams": { + "http_to_kafka_chat": { + "input": { + "http_server": { + "path": "/chat", + "allowed_verbs": [ "POST" ], + + }, + "label": "HTTP Chat Input" + }, + "output": { + "kafka": { + "addresses": ["kafka-broker:9092"], + "topic": "chat", + + }, + "label": "Kafka Chat Output" + } + } + } + }, +// ... more config follows +} +``` + +For comprehensive details on all fields within `x-tyk-streaming`, see the [Tyk OAS Extension documentation](/api-management/gateway-config-tyk-oas#xtykstreaming). + + + + + +The Tyk Dashboard provides a wizard to create Streams APIs, which generates the underlying Tyk OAS configuration shown above. + +1. Navigate to **APIs > Add New API**. +2. Select the **Streams** API type and give your API a name. Click **Configure API**. + Streams Option +3. In the **API Designer**, under the **Streams** tab, configure your desired `Input` and `Output`. Select the types (e.g., HTTP Server for input, Kafka for output) and fill in the required parameters based on the [Streams Configuration Reference](/api-management/stream-config). + Input/Output Selection +4. Configure any other API settings (e.g., Authentication, Rate Limiting) as needed in the other tabs. +5. **Save** the API. The Dashboard translates your UI configuration into the `x-tyk-streaming` JSON structure within the API definition. You can view the generated JSON in the **Advanced Options** tab under **API Definition**. + + + + + +Tyk Streams configuration (`x-tyk-streaming`) is **only supported** within **Tyk OAS API Definitions**. It is not available for legacy Tyk Classic API Definitions. + + + + + +### Supported Connectors and Protocols + +Tyk Streams provides out-of-the-box connectors for popular event brokers and async protocols, including: + +- [Apache Kafka](https://kafka.apache.org/documentation/) +- [WebSocket](https://websocket.org/guides/websocket-protocol/) +- [Server-Sent Events](https://en.wikipedia.org/wiki/Server-sent_events) (SSE) +- [Webhooks](https://en.wikipedia.org/wiki/Webhook) + + +--- +## Use Cases + +Tyk Streams brings full lifecycle API management to asynchronous APIs and event-driven architectures. It provides a +comprehensive set of capabilities to secure, transform, monitor and monetize your async APIs. + +### Security + +[Tyk Streams](/api-management/event-driven-apis#) supports all the authentication and authorization options available for traditional synchronous APIs. This +ensures that your async APIs are protected with the same level of security as your REST, GraphQL, and other API types. + +Refer this docs, to know more about [Authentication](/api-management/client-authentication) and [Authorization](/api-management/policies) in Tyk. + +### Transformations and Enrichment + +[Tyk Streams](/api-management/event-driven-apis#) allows you to transform and enrich the messages flowing through your async APIs. You can modify message payloads, filter events, combine data from multiple sources and more. + +- **[Transformation](/api-management/traffic-transformation)**: Use Tyk's powerful middleware and plugin system to transform message payloads on the fly. You can convert between different data formats (e.g., JSON to XML), filter fields, or apply custom logic. +- **[Enrichment](/api-management/plugins/overview)**: Enrich your async API messages with additional data from external sources. For example, you can lookup customer information from a database and append it to the message payload. + +### Monetization + +[Tyk Streams](/api-management/event-driven-apis#) enables you to monetize your async APIs by exposing them through the Developer Portal. Developers can discover, subscribe to and consume your async APIs using webhooks or streaming subscriptions. + +- **Developer Portal Integration**: Async APIs can be published to the Tyk Developer Portal, allowing developers to browse, subscribe, and access documentation. Developers can manage their async API subscriptions just like traditional APIs. +- **Webhooks**: Tyk supports exposing async APIs as webhooks, enabling developers to receive event notifications via HTTP callbacks. Developers can configure their webhook endpoints and subscribe to specific events or topics. + +### Complex Event Processing + +Tyk Streams allows you to perform complex event processing on streams of events in real-time. You can define custom processing logic to: + +- Filter events based on specific criteria +- Aggregate and correlate events from multiple streams +- Enrich events with additional data from other sources +- Detect patterns and sequences of events +- Trigger actions or notifications based on event conditions + +Here's an example of a Tyk Streams configuration that performs complex event processing, specifically it creates a new event stream, which filters high-value orders and enriches them with customer email addresses, by making an additional HTTP request. + +```yaml +input: + kafka: + addresses: + - "localhost:9092" # Replace with actual Kafka broker addresses + consumer_group: my-group + topics: + - orders +output: + http_server: + allowed_verbs: + - GET + path: /high-value-orders +pipeline: + processors: + - mapping: | + root = if this.order_value > 1000 { + this + } else { + deleted() + } + - branch: + processors: + - http: + headers: + Content-Type: application/json + url: http://customer-api.local/emails + verb: POST + request_map: |- + root = { + "customer_id": this.customer_id + } + result_map: root.customer_email = this.customer_email + - mapping: | + root = this.merge({ "high_value_order": true }) +``` + +In this example: + +- **Tyk Streams Setup**: Consumes events from a Kafka topic called *orders*. +- **Processor Block Configuration**: Utilizes a custom `Mapping` script that performs the following operations: + - **Filters** orders, only processing those with a value greater than 1000. + - **Enriches** the high-value orders by retrieving the customer ID and email from a separate data source. + - **Adds** a new high_value_order flag to each qualifying event. +- **Output Handling**: Processed high-value order events are exposed via a WebSocket stream at the endpoint */high-value-orders*. + +### Legacy Modernization + +Tyk Streams can help you modernise legacy applications and systems by exposing their functionality as async APIs. This allows you to: +- Decouple legacy systems from modern consumers +- Enable real-time, event-driven communication with legacy apps +- Gradually migrate away from legacy infrastructure + +Here's an example of exposing a legacy application as an async API using Tyk Streams: + +```yaml +input: + http_client: + url: "http://legacy-app/orders" + verb: GET + rate_limit: "60s" +pipeline: + processors: + - mapping: | + root.order_id = this.id + root.total = this.total + root.timestamp = this.timestamp +output: + kafka: + addresses: ["localhost:9092"] + topic: "orders" +``` + +In this configuration: +- Tyk Streams periodically polls the legacy */orders* REST endpoint every 60 seconds +- The *processor* transforms the legacy response format into a simplified event structure +- The transformed events are published to a Kafka topic called *orders*, which can be consumed by modern applications + +### Async API Orchestration + +Tyk Streams enables you to orchestrate multiple async APIs and services into composite event-driven flows. You can: +- Combine events from various streams and sources +- Implement complex routing and mediation logic between async APIs +- Create reactive flows triggered by event conditions +- Fanout events to multiple downstream consumers + +Here's an example async API orchestration with Tyk Streams: + +```yaml +input: + broker: + inputs: + - kafka: + addresses: ["localhost:9092"] + topics: ["stream1"] + consumer_group: "group1" + - kafka: + addresses: ["localhost:9092"] + topics: ["stream2"] + consumer_group: "group2" +pipeline: + processors: + - switch: + cases: + - check: 'meta("kafka_topic") == "stream1"' + processors: + - mapping: | + root.type = "event_from_stream1" + root.details = this + - branch: + processors: + - http: + url: "http://api1.example.com/process" + verb: POST + body: '${! json() }' + result_map: 'root.api1_response = this' + - check: 'meta("kafka_topic") == "stream2"' + processors: + - mapping: | + root.type = "event_from_stream2" + root.details = this + - branch: + processors: + - http: + url: "http://api2.example.com/analyze" + verb: POST + body: '${! json() }' + result_map: 'root.api2_response = this' + - mapping: 'root = if this.type == "event_from_stream1" && this.api1_response.status == "ok" { this } else if this.type == "event_from_stream2" && this.api2_response.status == "ok" { this } else { deleted() }' +output: + broker: + pattern: "fan_out" + outputs: + - kafka: + addresses: ["localhost:9092"] + topic: "processed_stream1" + client_id: "tyk_fanout1" + - kafka: + addresses: ["localhost:9092"] + topic: "processed_stream2" + client_id: "tyk_fanout2" + - http_client: + url: "https://webhook.site/unique-id" + verb: POST + body: '${! json() }' +``` + +1. **Input Configuration** + - Uses a broker to combine events from two different Kafka topics, stream1 and stream2, allowing for the integration of events from various streams. +2. **Complex Routing and Processing** + - A switch processor directs messages based on their origin (differentiated by Kafka topic metadata). + - Each stream’s messages are processed and conditionally sent to different APIs. + - Responses from these APIs are captured and used to decide on message processing further. +3. **Reactive Flows** + - Conditions based on API responses determine if messages are forwarded or discarded, creating a flow reactive to the content and success of API interactions. + - Fanout to Multiple Consumers: + - The broker output with a fan-out pattern sends processed messages to multiple destinations: two different Kafka topics and an HTTP endpoint, demonstrating the capability to distribute events to various downstream consumers. + +These are just a few examples of the advanced async API scenarios made possible with Tyk Streams. The platform provides a flexible and extensible framework to design, deploy and manage sophisticated event-driven architectures. + +### Monetize APIs using Developer Portal + +Tyk Streams seamlessly integrates with the Tyk Developer Portal, enabling developers to easily discover, subscribe to, and consume async APIs and event streams. This section covers how to publish async APIs to the developer portal, provide documentation, and enable developers to subscribe to events and streams. + + + +#### Publishing Async APIs to the Developer Portal + +Publishing async APIs to the Tyk Developer Portal follows a similar process to publishing traditional synchronous APIs. API publishers can create API products that include async APIs and make them available to developers through the portal. + +To publish an async API: +- In the Tyk Dashboard, create a new API and define the async API endpoints and configuration. +- Associate the async API with an API product. +- Publish the API product to the Developer Portal. +- Copy code + +{/* [Placeholder for screenshot or GIF demonstrating the process of publishing an async API to the Developer Portal] */} + + + +#### Async API Documentation + +Providing clear and comprehensive documentation is crucial for developers to understand and effectively use async APIs. While Tyk Streams does not currently support the AsyncAPI specification format, it allows API publishers to include detailed documentation for each async API. + +When publishing an async API to the Developer Portal, consider including the following information in the documentation: +- Overview and purpose of the async API +- Supported protocols and endpoints (e.g., WebSocket, Webhook) +- Event types and payloads +- Subscription and connection details +- Example code snippets for consuming the async API +- Error handling and troubleshooting guidelines + +{/* [Placeholder for screenshot showcasing async API documentation in the Developer Portal] */} + + + +#### Enabling Developers to Subscribe to Events and Streams + +Tyk Streams provides a seamless way for developers to subscribe to events and streams directly from the Developer Portal. API publishers can enable webhook subscriptions for specific API products, allowing developers to receive real-time updates and notifications. +To enable webhook subscriptions for an API product: +1. In the Tyk Developer Portal, navigate to the API product settings. +2. Enable the "Webhooks" option and specify the available events for subscription. +3. Save the API product settings. + +Enable Portal Webhooks + +{/* [Placeholder for screenshot showing the API product settings with webhook configuration] */} + +Once webhook subscriptions are enabled, developers can subscribe to events and streams by following these steps: +- In the Developer Portal, navigate to the My Apps page. +- Select the desired app. +- In the "Webhooks" section, click on "Subscribe". +- Provide the necessary details: + - *Webhook URL*: The URL where the event notifications will be sent. + - *HMAC Secret*: Provide a secret key used to sign the webhook messages for authentication. + - *Events*: Select the specific events to subscribe to. +- Save the subscription settings. +- Copy code +{/* [Placeholder for screenshot illustrating the developer's view of subscribing to webhooks] */} + +subscribe to webhooks from portal + +To configure the async API stream for webhook subscriptions, use the following output configuration in your API definition: + +```yaml +outputs: + - portal_webhook: + event_type: bar + portal_url: http://localhost:3001 + secret: +``` + +Replace `` with the secret key for signing the webhook messages. + +Enabling webhook subscriptions allows developers to easily integrate real-time updates and notifications from async APIs into their applications, enhancing the overall developer experience and facilitating seamless communication between systems. +{/* [Placeholder for a diagram illustrating the flow of webhook subscriptions and event notifications] */} + +With Tyk Streams and the Developer Portal integration, API publishers can effectively manage and expose async APIs, while developers can discover, subscribe to, and consume event streams effortlessly, enabling powerful real-time functionality in their applications. + + +--- +## Glossary + +### Event + +An event represents a significant change or occurrence within a system, such as a user action, a sensor reading, or a data update. Events are typically lightweight and contain minimal data, often just a unique identifier and a timestamp. + +### Stream + +A stream is a continuous flow of events ordered by time. Streams allow for efficient, real-time processing and distribution of events to multiple consumers. + +### Publisher (or Producer) + +A publisher is an application or system that generates events and sends them to a broker or event store for distribution to interested parties. + +### Subscriber (or Consumer) + +A subscriber is an application or system that expresses interest in receiving events from one or more streams. Subscribers can process events in real-time or store them for later consumption. + +### Broker + +A broker is an intermediary system that receives events from publishers, stores them, and forwards them to subscribers. Brokers decouple publishers from subscribers, allowing for scalable and flexible event-driven architectures. + +### Topic (or Channel) + +A topic is a named destination within a broker where events are published. Subscribers can subscribe to specific topics to receive relevant events. + +--- +## FAQ + + + +Tyk Streams is an extension to the Tyk API Gateway that supports asynchronous APIs and event-driven architectures. It solves the challenge of managing both synchronous and asynchronous APIs in a unified platform, allowing organizations to handle real-time event streams alongside traditional REST APIs. + + + +Refer this [documentation](#supported-connectors-and-protocols). + + + +Currently, Tyk Streams is only available for hybrid customers on Tyk Cloud. To enable it, [contact support](https://tyk.io/contact/). + + + +Yes, as of Tyk v5.7.0, you can publish Tyk Streams APIs to the Tyk Developer Portal. The process is similar to publishing traditional APIs: create a Tyk Streams API, create a Policy to protect it, and publish it to the Developer Portal Catalog. + + + +Tyk Streams is embedded within the Tyk Gateway and scales with your existing Tyk infrastructure. No additional infrastructure is required. + + + +Tyk Streams is available exclusively in the `enterprise` edition. Currently, it is only accessible for hybrid customers using Tyk Cloud. Please refer to the latest documentation or reach out to [Tyk support](https://tyk.io/contact/) for specific availability in your edition. + + diff --git a/api-management/external-service-integration.mdx b/api-management/external-service-integration.mdx new file mode 100644 index 000000000..d3e7f2474 --- /dev/null +++ b/api-management/external-service-integration.mdx @@ -0,0 +1,305 @@ +--- +title: "Tyk Identity Broker - Integrate Social Logins, IDPs, LDAP and Custom Authentication" +description: "Learn how to integrate external services with Tyk API Gateway. Discover how to use middleware plugins, webhooks, and service discovery to extend your API functionality and connect with third-party systems." +keywords: "Tyk Identity Broker, TIB, Identity Provider, Identity Handler, SSO, Custom Authentication, Custom Proxy Provder, SAML, OIDC, OpenID Connect, Profies, IDPs, Social Provider, LDAP" +sidebarTitle: "Tyk Identity Broker (TIB)" +--- + +## Introduction + +Tyk Identity Broker (TIB) is a solution for integrating various **Identity Management Systems (such as LDAP, Social OAuth, Okta)** with your Tyk installation. + +With TIB, you gain the flexibility to connect your existing user directories to Tyk Dashboard or Developer Portal, streamlining access management and enhancing security. Whether you're looking to implement SSO, leverage social logins, or integrate with enterprise identity providers, TIB provides the tools and configurations to make it happen. + +This page introduces general features of Tyk Identity Broker (TIB) and how to configure them. If you are looking for global configurations of the TIB deployment refer this [config file](/tyk-configuration-reference/tyk-identity-broker-configuration). + +## What is Tyk Identity Broker (TIB)? + +Tyk Identity Broker (TIB) is a component providing a bridge between various Identity Management Systems such as LDAP, Social OAuth (e.g. GPlus, Twitter, GitHub) or Basic Authentication providers, to your Tyk installation. + +TIB can act as a bridge between the API Gateway, Tyk Portal or even the Tyk Dashboard, and makes it easy to integrate custom IDMs to your system. + +Starting from Tyk v3.0 TIB has been added as a built-in feature of the Tyk Dashboard. You no longer have to setup a separated instance of the service to make it work with the Dashboard. You now have two options: +1. Internal TIB: Embedded in dashboard. Easy configuration and set up. Share the same port as the dashboard +2. External TIB: Installation of TIB as a different component for advanced use cases. Requires changes to the config files and separate port. + + +**What can you do with the Tyk Identity Broker (TIB)?** + +By using the identity broker in conjunction with an IDP you have the ability to perform actions such as: + +- Enabling easy access via social logins to the developer portal (e.g. GitHub login) +- Enabling internal access to the dashboard (e.g. via LDAP/ActiveDirectory) +- Enabling easy token generation from a third party for things such as mobile apps and webapps without complex configuration + +## Working of Tyk Identity Broker (TIB) + +TIB provides a simple API through which traffic can be sent. The API will match the request to a profile which then exposes two things: + +1. An **Identity Provider** that will authorize a user and validate their identity +2. An **Identity Handler** that will authenticate a user with a delegated service (in this case, Tyk) + +### Identity Providers + +Identity providers can be anything, as long as they implement the `tap.TAProvider` interface. Bundled with TIB at the moment you have four provider types: + +1. **Social** - this provides OAuth handlers for many popular social logins (such as Google, Github and Bitbucket) +2. **LDAP** - a simple LDAP protocol binder that can validate a username and password against an LDAP server (tested against OpenLDAP) +3. **Proxy** - a generic proxy handler that will forward a request to a third party and provides multiple "validators" to identify whether a response is successful or not (e.g. status code, content match and regex) +4. **SAML** - provides a way to authenticate against a SAML IDP. + +### Identity Handlers + +An identity handler will perform a predefined set of actions once a provider has validated an identity. These actions are defined as a set of action types: + +1. `GenerateOrLoginUserProfile` - this will log a user into the Tyk Dashboard (this does not create a user, it only creates a temporary session for the user to have access). This flow is defined as next: + +Generate Or Login User Profile flow + +2. `GenerateOrLoginDeveloperProfile` - this will create or login a user to the Tyk Developer Portal. The flow is similar to _GenerateOrLoginUserProfile_ but in this case if the developer doesn't exist then it will be created. + +3. `GenerateOAuthTokenForClient` - this will act as a client ID delegate and grant an Tyk provided OAuth token for a user using a fragment in the redirect URL (standard flow). The flow is defined as: + +Generate Oauth token for client + +### Exploring TIB Profiles + +TIB takes as input one or many profiles that are stored in mongo or a file (it depends on the type of installation), a profile is a configuration that outlines of how to match a identity provider with a handler and what action to perform (Example: enable Dashboard SSO using OpenID and Microsoft Azure as IDP). The Dashboard adds a user interface to manage the profiles. + +Identity Broker User Interface + +### Anatomy of a Profile +Each profile is outlined by a series of attributes that will describe: action to perform, IDP to connect, URL's to redirect on success and failure, etc. +In order to know and understand each of the attributes, implications as well as configure your own profile please consult the profile structure below: + +#### Fields that are common for all the providers + +| Field | Description | Required | +| :------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :---------- | +| ID | ID of the profile, is a string, use the name of the profile +| OrgID | Organization ID | Yes | +| ActionType | Which action is expected to be executed while using this profile, valid values are:
  • `GenerateOrLoginDeveloperProfile`: SSO portal
  • `GenerateOrLoginUserProfile`: SSO dashboard
  • `GenerateOAuthTokenForClient`: generate OAuth tokens
| Yes | +| Type | Valid values are:
  • `passthrough`: for LDAP and ProxyProvider
  • `redirect`: for SAML and Social
| Yes | +| CustomEmailField | Name of the claim associated with the email value stored in the IDP (Identity Provider). | No | +| CustomUserIDField | Name of the claim associated with the User ID value stored in the IDP (Identity Provider). | No | +| IdentityHandlerConfig.DashboardCredential | API Key that will be used to consume the dashboard API to issue nonce codes and validate user data | yes | +| ReturnURL | Where to redirect and send the claims from the IDP on login. For dashboard SSO it would be `http://dashboard-host/tap`. For classic portal SSO it would be `http://{portal-host}/sso` | yes | +| DefaultUserGroup | When mapping groups, if a group is not found, specify which group to fallback to. | No | +| CustomUserGroupField | Name of the claim associated with the Group ID values stored in the Identity Provider | No | +| UserGroupMapping | Map that contains the matching between Tyk groups and IDP group. | No | +| UserGroupSeparator | The IDP might send the groups to which the user belongs to as a single string separated by any symbol or empty spaces, with this field you can set which symbol to use to split as an array | No | +| SSOOnlyForRegisteredUsers | A boolean value to restrict the SSO only to users that already exists in the database. Users that do not exist in the database and successfully logins in the IDP will not have access to tyk | No | + + +#### LDAP profile fields + +| Field | Description | Required | +| :------------------------ | :--------------------------------------------------------------------------------------------------------------------------------- | :------------------------------- | +| LDAPUseSSL | Whether to connect with the LDAP server via TLS, e.g. *true* or *false* | No | +| LDAPServer | LDAP Server address, e.g. *ldap://hostname*. | Yes | +| LDAPPort | LDAP Port, e.g. *389* or *636*. | Yes | +| LDAPUserDN | Required to uniquely identify and locate a user's entry in the LDAP directory | Yes | +| LDAPBaseDN | Distinguished Name from where the search will start | No | +| LDAPFilter | Used for filtering in the LDAP server | No | +| LDAPEmailAttribute | The name of the field in the LDAP schema that represents the user's email. Defaults to *mail*. | No | +| LDAPFirstNameAttribute | The name of the field in the LDAP schema that represents the user's first name. Defaults to *givenName* | No | +| LDAPLastNameAttribute | The name of the field in the LDAP schema that represents the user's last name. Defaults to *sn*. | No | +| LDAPAdminUser | Admin user name | No | +| LDAPAdminPassword | Admin password | No | +| LDAPAttributes | List of attributes to return when a matching LDAP record is found, for example ['cn', 'mail', 'ou'] | Yes. It can be an empty list | +| LDAPSearchScope | The scope is an integer value that determines the depth of the search in the directory hierarchy | No | +| FailureRedirect | In the event of a login failure this is the URL that the user will be redirected to. | Yes | +| DefaultDomain | Domain in which the LDAP is running. Used to build the username but not to perform the requests. | No | +| GetAuthFromBAHeader | A boolean value that, when set to *true*, instructs TIB to gather the user and password from the Authorization header when handling the request. | No | +| SlugifyUserName | When set to *true* enhance the username so that is URL friendly. | No | + +#### ProxyProvider profile fields + +| Field | Description | Required | +| :------------------------------------ | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :---------------------------------------------- | +| TargetHost | URL of the server | Yes | +| OKCode | This is an integer represents the HTTP status code that represents a successful response from the target service. If the response code matches this value the identity broker treats it as a successful interaction. | No. But one of OKCode, OKResponse, or OKRegex should be filled | +| OKResponse | This field specifies a particular string that should match with the response body to be considered successful. | No. But one of OKCode, OKResponse, or OKRegex should be filled | +| OKRegex | Is used to validate the content of the response beyond just the HTTP status code. If the response body contains data that matches this regular expression, it is considered a successful response. | No. But one of OKCode, OKResponse, or OKRegex should be filled | +| ResponseIsJson | This parameter helps the identity broker understand how to interpret the response body from the target service. If ResponseIsJson is set to true, the broker will expect the response to be in JSON format and will process it accordingly. This includes parsing JSON data to extract relevant information. This is a boolean field. | No | +| AccessTokenField | The name of the field that contains the access token. | No | +| UsernameField | The name of the field that contains the username. | No | +| ExrtactUserNameFromBasicAuthHeader | A boolean value that, when set to true, instructs TIB to gather the user and password from the Authorization header when handling the request. | No | +#### Social profile fields + +| Field | Description | Required | +| :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------ | +| CallbackBaseURL | URL to be redirected on success login | Yes | +| FailureRedirect | URL to be redirected on failure | Yes | +| UseProviders.Name | Name of the provider to be used. Valid values: `gplus`, `github`, `twitter`, `linkedin`, `dropbox`, `digitalocean`, `bitbucket`, `salesforce`, `openid-connect` | Yes | +| UseProviders.Key | Oauth Client key | yes | +| UseProviders.Secret | Oauth Client Secret | yes | +| UseProviders.DiscoverURL | used to dynamically retrieve the OpenID Provider's configuration metadata, including endpoints and supported features, in JSON format from /.well-known/openid-configuration. | Only required when using openid-connect | +| UseProviders.Scopes | Specifies the level of access or permissions a client is requesting from the user and the authorization server, for example ["openid","email"]. | No, however when using openID the scope β€˜openid’ should be added | +| UseProviders.SkipUserInfoRequest | Determines whether to bypass the *UserInfo* endpoint request, improving performance by relying on the ID token alone for user details. | No | +| JWE.Enabled | When set to true, JWE will be enabled, allowing Tyk to decrypt the ID token received from the IdP. If set to false, the ID token will not be decrypted. | No | +| JWE.PrivateKeyLocation | Specifies the path or identifier (certid) for the certificate that contains the private key used to decrypt the ID token when JWE is enabled. This certificate must be in PEM format and include both the public certificate and the private key. | Is only required if JWE is enabled | + +#### SAML profile fields + +| Field | Description | Required | +| :---------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------ | +| IDPMetadataURL | This is a URL, e.g. `https://login.microsoftonline.com/your-tenant-id/federationmetadata/2007-06/federationmetadata.xml`, that links to [XML metadata](https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf) containing information necessary for interaction with SAML-enabled identity or service providers. The document contains example URLs of endpoints, information about supported bindings, identifiers and public keys. Once you create your TIB profile you can find the SP metadata file under `{Dashboard HOST}/auth/{TIB Profile Name}/saml/metadata` | Yes | +| CertLocation | An X.509 certificate and the private key for signing your requests to the IDP. The value for `CertLocation` should be the path to a single file with the cert and key concatenated, e.g. `/etc/ssl/certs/example_cert.pem`. When used in an [embedded TIB instance in the dashboard](#installing-tyk-identity-broker-tib) then the `CertLocation` value can be the *certId* from the certificate manager. For further details please refer to [SSO with SAML](/api-management/single-sign-on-saml) | Yes | +| SAMLBaseURL | The host of TIB, e.g. `http://tyk-dashboard:3000/`, that will be used in the metadata document for the Service Provider. This will form part of the metadata URL used as the Entity ID by the IDP. The redirects configured in the IDP must match the expected Host and URI configured in the metadata document made available by Tyk Identity Broker. | Yes | +| ForceAuthentication | Ignore any session held by the IDP and force re-login every request. Defaults to false | No | +| SAMLBinding | Key for looking up the email claim in the SAML assertion form the IDP. Defaults to: https://schemas.xmlsoap.org/ws/2005/05/identity/claims.xsd | No | +| SAMLEmailClaim | Key for looking up the email claim in the SAML assertion form the IDP. Defaults to: https://schemas.xmlsoap.org/ws/2005/05/identity/claims.xsd | No | +| SAMLForenameClaim | Key for looking up the forename claim in the SAML assertion form the IDP. Defaults to: https://schemas.xmlsoap.org/ws/2005/05/identity/claims.xsd | No | +| SAMLSurnameClaim | Key for looking up the surname claim in the SAML assertion form the IDP. Defaults to: https://schemas.xmlsoap.org/ws/2005/05/identity/claims.xsd | No | +| FailureRedirect | URL to redirect the user if the login is not successful | Yes | +| EntityId | It is used to distinguish between different entities (IDP & SP) and ensure proper routing and validation of SAML assertions and requests. Defaults to the value set in the field `IDPMetadataURL` | No | + +## Installing Tyk Identity Broker (TIB) + +There are two ways to install TIB: + +1. **Embedded TIB**: Starting from Tyk Dashboard v3.0 TIB is built-in to the dashboard, in this case TIB will store the profiles in the same mongo database configured for dashboard + +2. **Standalone TIB**: Deployed as a seperate entity. In the standalone TIB, the profiles will be stored in file indicated when the app is started + +**Pre-requisites** + +Below are the prerequisites of TIB: + +- Tyk Gateway v1.9.1+ +- Redis +- Tyk Dashboard v0.9.7.1+ (Only if you want to do SSO to Tyk Dashboard UI or Tyk Developer Portal) + +### Enable Embedded TIB + +For the embedded TIB you don't have to do anything, only ensure that in the Dashboard's config file `identity_broker` is not pointing to an external service, and `identity_broker.enabled` is set to `true`. For example: + +```json +"identity_broker": { + "enabled": true, +}, +``` + +This settings behaves as follows: + +* If `enabled` = `false` then neither the external or internal TIB will be loaded +* If `enabled` = `true` and the tib host is not present the internal TIB will be loaded +* If `enabled` = `true` and the tib host is set, then external TIB will be loaded + +### Install Standalone TIB + +Below are the three deployment options to install TIB as a standalone application: + +1. **Docker:** + + You can install via [Docker](https://hub.docker.com/r/tykio/tyk-identity-broker/). + +2. **Linux Packages:** + + You can install via [packages](https://packagecloud.io/tyk/tyk-identity-broker/install#bash-deb) (deb or rpm). + +3. **Helm Chart for Kubernetes:** + + [Tyk Helm Chart](/product-stack/tyk-charts/overview) does not support installing TIB as separate application. If you want to enable embedded TIB in Dashboard, you can do so by updating `tib.enabled` to `true` in `tyk-dashboard` chart. If you are using an umbrella chart from us (e.g. `tyk-stack` and `tyk-control-plane`), you can do so by updating `tyk-dashboard.tib.enabled` to `true`. + +### Important TIB Configurations + +#### Configure secret for hashing session cookies + +To secure session cookies within Tyk Identity Broker (TIB) when integrating with social providers, setting the `TYK_IB_SESSION_SECRET` environment variable is crucial. This variable plays a pivotal role in hashing session cookies, thereby enhancing security. By default, if this variable isn't explicitly set, TIB falls back to using the Tyk Dashboard's admin_secret when it's embedded in the dashboard. + +For a seamless and secure setup, start by generating a strong, unique secret string. It is recommended to use a string with 32 or 64 bytes to ensure optimal security, this string will be your session secret. In a Linux, Unix, or MacOS environment, you can set this variable by running the command `export TYK_IB_SESSION_SECRET='your_secret'`. + +#### Setting Absolute Paths + +No command line arguments are needed, but if you are running TIB from another directory or during startup, you will need to set the absolute paths to the profile and config files: + +```bash +Usage of ./tyk-auth-proxy: + -c=string + Path to the config file (default "tib.conf") + -p#=string + Path to the profiles file (default "profiles.json") +``` + +See [how to configure TIB](https://github.com/TykTechnologies/tyk-identity-broker#how-to-configure-tib) + + +## Exploring Tyk Identity Broker REST API + +Refer to this [document](/tyk-identity-broker/tib-rest-api) + +## Single Sign On (SSO) + +SSO gives users the ability to log in to multiple applications without the need to enter their password more than once. +Authentication protocols such as OpenID Connect and SAML enable an application to verify the identity of users from an organization without the need to self store and manage them, and without doing the identification process and exposing their passwords to that application. Their lists of users and passwords are kept safe in one single place, in the IDP that the organization has chosen to use. The Authorization server of the IdP identify the users for a pre-registered and approved application (`client` in OAuth and OIDC terminology). + +Using our Tyk-Identity-Broker (TIB), you can do both - use your existing users directory to login to the **Dashboard** or **Developer Portal** and have an SSO. TIB, among other options, supports four methods for login to Tyk's UI: + +
+ + + +To activate SSO on the Dashboard or Developer portal, there’s no requirement to install TIB separately; it is integrated into the Dashboard and Developer Portal. You have two configurations for SSO within the dashboard: +1. **Using Embedded TIB**: No need to install it separately. +2. **Using External TIB**: If you are using a previous version of the Dashboard or Portal, you can still use SSO with TIB installed as a separate application. + + + +1. [Login with 3rd party social providers](/api-management/single-sign-on-social-idp) +2. [Login with any IdP that supports OIDC](/api-management/single-sign-on-oidc) +3. [Login with any IdP that supports SAML](/api-management/single-sign-on-saml) +3. [Login with LDAP](/api-management/single-sign-on-ldap) + +### Tyk's REST API for SSO + +The SSO API allows you to implement custom authentication schemes for the Dashboard and Portal. You can access the API by both admin and dashboard APIs. +Our Tyk Identity Broker (TIB) internally also uses these APIs. + +#### Generate authentication token + +The Dashboard exposes two APIs: + +- `/admin/sso` - See [Dashboard Admin API SSO](/api-management/dashboard-configuration#single-sign-on-api-1) for more details. +- `/api/sso` - See [Dashboard API SSO](/api-management/dashboard-configuration#single-sign-on-api) for more details. + +which allow you to generate a temporary authentication token, valid for 60 seconds. They make same thing you can select one of them and use it. +However, the admin API requires `admin-auth` header which should be same with `admin-secret` parameter in `tyk_analytics.conf`, the regular API requires `authorization` header which should be same with the user authentication token. + +#### Using the Token + +Once you have issued a token you can login to the dashboard using the `/tap` url, or to the portal using the `/sso` URL, and provide an authentication token via the `nonce` query param. +If `nonce` is valid, Tyk will create a temporary user and log them in. + +If you want to re-use existing dashboard users, instead of creating temporary ones, you can set `"sso_enable_user_lookup": true` variable in the Dashboard config file (`tyk_analytics.conf`). This way you can set individual permissions for users logged via SSO. + +##### Set up default permissions for the dashboard + +If you use the token with `dashboard` scope, and would like to avoid login in as admin user (which is the default permissions), you can add the `sso_permission_defaults` configuration option to the Dashboard config file (`tyk_analytics.conf`) to specify SSO user permissions in the following format: + +``` +"sso_permission_defaults": { + "analytics": "read", + "apis": "write", + "hooks": "write", + "idm": "write", + "keys": "write", + "policies": "write", + "portal": "write", + "system": "write", + "users": "write", + "user_groups": "write" +} +``` + +As alternative, you can set `sso_default_group_id` to specify User Group ID assigned to SSO users. + +In order to set individual user permissions, you should first create this users in the dashboard first, set needed permissions, enable `sso_enable_user_lookup` to `true` inside dashboard config. If SSO user with the same email will be found in Dashboard users, it will re-use his permissions. + +##### Sample Login Request + +```{.copyWrapper} +GET /tap?nonce=YTNiOGUzZjctYWZkYi00OTNhLTYwODItZTAzMDI3MjM0OTEw HTTP/1.1 +Host: localhost:3000 +``` diff --git a/api-management/gateway-config-introduction.mdx b/api-management/gateway-config-introduction.mdx new file mode 100644 index 000000000..4246610b8 --- /dev/null +++ b/api-management/gateway-config-introduction.mdx @@ -0,0 +1,90 @@ +--- +title: "Configuring Tyk Gateway" +description: "Explain the concept of Tyk API definition and the different types Tyk offers" +keywords: "API Definition, API Definition Object, API Definition Location" +sidebarTitle: "Overview" +--- + +## Introduction + +Tyk API Gateway is a [reverse-proxy](https://en.wikipedia.org/wiki/Reverse_proxy) that serves as an intermediary managing API traffic between clients and the upstream API service. It consists of a series of middleware blocks that process API requests received from clients. These middleware perform various checks and transformations of and to the request preparing it to be routed to the upstream. The upstream API service executes core business logic and returns responses to Tyk Gateway. The response is similarly passed through a series of middleware blocks before being returned to the client. + +Each of these middleware can be configured so that it will only allow the specific requests that you want to reach your upstream, and in the correct form. The request middleware chain encompasses functionality that includes: + +- listening for requests +- authentication and authorization of the client +- rate and quota limiting +- checking that the request is valid +- applying transformations to the payload and headers +- triggering event handlers that can notify external systems of certain events +- checking availability of the upstream service +- ... and finally routing to the correct target applying load balancing between multiple upstreams if required + +You can even create custom middleware (plugins) that will perform non-standard checks and transformations. As you can imagine Tyk has a lot of configuration options to implement all of this! + + +## Configuring the Gateway + +Tyk Gateway is configurable at three levels of granularity: + +- *Gateway level* settings that apply to all API proxies hosted on Tyk +- *API level* settings that apply to a specific API proxy +- *Endpoint level* settings that apply to specific endpoints (operations consisting of HTTP method and path) within an API proxy + +Some features can be configured at multiple levels. Where this is the case, specific precedence rules apply and are described in the relevant section of the documentation. + +### Gateway level settings + +Gateway level settings are stored in a file (typically `tyk.conf`) that is applied when the Gateway starts up, affecting all API proxies deployed on Tyk. They can also be configured using the equivalent environment variables. The Gateway level settings are documented [here](/tyk-oss-gateway/configuration). + +If you are using a config file you can store settings, typically secrets, in environment variables or an external key-value store and provide references to the stored keys within the configuration file. This is explained [here](/tyk-configuration-reference/kv-store). + +### API and endpoint level settings + +API and endpoint level settings are configured using an *API definition*. + +This is a structured JSON object that encapsulates all of the details that apply specifically to that API, including the listen path, upstream target details, valid endpoints and operations, rate limits, authentication, versioning, and both built-in and custom middleware. + +You can store settings, typically secrets, in environment variables or an external key-value store and provide references to the stored keys within the API definition. This is explained [here](/tyk-configuration-reference/kv-store). + +API definition objects can be compact for a basic pass-through API, and can become very complex and large for APIs that require significant processing to be completed both before the request is proxied to the upstream service and once the response is received. + + +## API Definitions + +An *API definition* is the specification for an API proxy, providing Tyk with everything it needs to receive and process requests. Using Tyk's mock response, virtual endpoint and custom plugin functionality, you don't even need an upstream service - with a single API definition you can emulate a service entirely within Tyk, providing a [mock response](/api-management/traffic-transformation/mock-response). + +Tyk supports two types of API definition depending on the type of service that you are looking to proxy: + +- [Tyk OAS API definitions](/api-management/gateway-config-tyk-oas) are used for REST and streaming use cases +- [Tyk Classic API definitions](/api-management/gateway-config-tyk-classic) are used for GraphQL, XML/SOAP and TCP services + + + + + For versions of Tyk prior to 5.8 not all Gateway features can be configured using the Tyk OAS API definition, for edge cases you might need to use Tyk Classic for REST APIs, though we recommend updating to Tyk 5.8 and adopting Tyk OAS. + + + + +### Migrating to Tyk OAS + +In Tyk 4.1, we introduced the Tyk OAS API definition but initially it supported only a subset of the Gateway configuration options offered by Tyk Classic. Since then we have gradually added support until finally, with the launch of Tyk 5.8, we have reached effective parity with Tyk Classic and now recommend that Tyk OAS is used exclusively for REST use cases. + +**Tyk 5.8 continues to support Tyk Classic for REST, but we will not be adding support for new features to this API definition style and strongly recommend migrating to Tyk OAS.** + +For Tyk Dashboard users with an existing portfolio of Tyk Classic API definitions, we provide a [migration tool](/api-management/migrate-from-tyk-classic), available via the Dashboard API and UI. + +### Storing API definitions + +For Tyk Open Source users, API definitions should be stored in `.json` files in the following location accessible by the Tyk Gateway: +- `/var/tyk-gateway/apps` (Linux) +- `/opt/tyk-gateway/apps` (Docker) + +For Tyk Dashboard users, API definitions will be kept in your [main storage](/api-management/dashboard-configuration#data-storage-solutions). + +### A note on terminology + +It's important not to confuse the *API proxy* with the API for the upstream service. Typically we refer to *API proxy* or *API* when refering to the endpoints exposed on Tyk Gateway and *upstream* or *upstream API* for the service that you develop and deploy to perform your business logic and data handling. + + diff --git a/api-management/gateway-config-managing-classic.mdx b/api-management/gateway-config-managing-classic.mdx new file mode 100644 index 000000000..b17c9fef3 --- /dev/null +++ b/api-management/gateway-config-managing-classic.mdx @@ -0,0 +1,556 @@ +--- +title: "Managing Tyk Classic API Definition" +description: "How to manage Tyk Classic API definition" +keywords: "Tyk Classic API, Create, Update, Import, API Key, Security Policy" +sidebarTitle: "Tyk Classic APIs" +--- + +import { ButtonLeft } from '/snippets/ButtonLeft.mdx'; +import CreateApiInclude from '/snippets/create-api-include.mdx'; +import CreateApiKeyInclude from '/snippets/create-api-key-include.mdx'; +import CreateSecurityPolicyInclude from '/snippets/create-security-policy-include.mdx'; +import ImportApiInclude from '/snippets/import-api-include.mdx'; + +## Create an API + +### What does it mean to create an API in Tyk + +You have a running service with an API that you want your users to consume; you want to protect and manage access to that API using Tyk Gateway - how do you do that? +
+For Tyk Gateway to protect and [reverse proxy](https://en.wikipedia.org/wiki/Reverse_proxy) calls to your upstream service, you need to configure an API on Tyk Gateway. The minimum information that Tyk requires is the **listen path** (which is a path on the Tyk Gateway URL that you want your consumers to call) and your **API URL** (which is the URL of your service to which Tyk should forward requests). +
+This information and other configuration values are stored in an object called a *Tyk API Definition*. Once you have created your Tyk API Definition and deployed it in the Gateway, Tyk can start serving your consumers, forwarding their requests to your upstream service's API. + +To reach a detailed guide to creating Tyk API Definitions, please choose the tab for the product you are using: + +### Tyk Cloud + +Tyk Cloud is a fully managed service that makes it easy for API teams to create, secure, publish and maintain APIs at any scale, anywhere in the world. Tyk Cloud includes everything you need to manage your global API ecosystem: [Tyk Gateways](/tyk-oss-gateway), [Tyk Dashboard](/api-management/dashboard-configuration), [Tyk Developer Portal](/portal/overview/intro) and [Universal Data Graph](/api-management/data-graph#overview). +
+ +To embark on your API journey with Tyk Cloud, we recommend going to our [Quick Start guide](/tyk-cloud#quick-start-tyk-cloud). This guide will walk you through the process of creating your very first API in Tyk Cloud. +For an advanced step by step guide we recommend visiting our [Getting Started guide](/tyk-cloud#comprehensive-tyk-cloud-setup). This will explain advanced configuration steps relating to how to distribute your API across nodes, in addition to adding and testing your API. + +### Tyk Self-Managed + + + +If the command succeeds, you will see: +```json +{ + "action": "added", + "key": "xxxxxxxxx", + "status": "ok" +} +``` + +**What did we just do?** + +We just sent an API definition to the Tyk `/apis` endpoint. See [API definition objects](/api-management/gateway-config-tyk-classic) for details of all the available objects. These objects encapsulate all of the settings for an API within Tyk. + +Want to learn more from one of our team of engineers? + + + +### Tyk Open Source + + + +**Note: Integration with your OpenAPI documentation** + +In Tyk v4.1 we introduced support for APIs defined according to the [OpenAPI Specification v3.0.3](https://spec.openapis.org/oas/v3.0.3) (OAS). +This introduces a standard way to describe the vendor-agnostic elements of an API (the OpenAPI Definition, stored as an OpenAPI Document); we take this and add Tyk-specific configuration options to create the *Tyk OAS API Definition*. You can import your own OpenAPI document and Tyk will use this to generate the Tyk OAS API Definition. +For details on using Tyk OAS with Tyk Gateway, check out our guide to [working with Tyk OAS APIs](/api-management/gateway-config-managing-oas). + + + +**Prerequisites** + +Before you continue this tutorial, you will need a running [Tyk OSS gateway](/tyk-oss-gateway). Click the button for instructions on how to install Tyk Gateway: + + + +#### Creating an API on Tyk Gateway + +There are two ways to configure Tyk Gateway with an API definition: +1. [Create an API with the Tyk Gateway API](#using-tyk-gateway-api) - Tyk Gateway has its own APIs which provides various services including the registering of Tyk API Definitions on the Gateway. +2. [Create an API in File-based Mode](#create-an-api-in-file-based-mode) - alternatively you can create a Tyk API Definition in a file and then load it to the Gateway. + + +#### Using Tyk Gateway API + +Watch our video to learn how to add an API to Tyk's Open Source Gateway using [Postman](https://www.postman.com/downloads/). + + + +In order to use the Gateway API to create a Tyk API Definition you will need the API key for your deployment's Gateway API and then issue just one command to create the API and make it live. + +1. **Make sure you know your API secret** + + The API key to access your Tyk Gateway API is stored in your `tyk.conf` file; the property is called `secret`. You will need to provide this value in a header called `x-tyk-authorization` when making calls to the Gateway API. + +2. **Create an API** + + To create the API, let's send a Tyk API definition to the `/apis` endpoint on your Tyk Gateway. Remember to change the `x-tyk-authorization` value (API key) in the header of your API call and set the domain name and port to target your Tyk Gateway in the `curl` command. + ```curl + curl -v -H "x-tyk-authorization: {your-secret}" \ + -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d '{ + "name": "Hello-World", + "slug": "hello-world", + "api_id": "Hello-World", + "org_id": "1", + "use_keyless": true, + "auth": { + "auth_header_name": "Authorization" + }, + "definition": { + "location": "header", + "key": "x-api-version" + }, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "use_extended_paths": true + } + } + }, + "proxy": { + "listen_path": "/hello-world/", + "target_url": "http://httpbin.org", + "strip_listen_path": true + }, + "active": true + }' http://{your-tyk-host}:{port}/tyk/apis | python -mjson.tool + ``` + + If the command succeeds, you will see: + ```json + { + "key": "Hello-World", + "status": "ok", + "action": "added" + } + ``` + + + +All APIs deployed on Tyk Gateway are given a unique `API ID`; if you don't provide one in the Tyk API Definition when creating the API, then an `API ID` will be generated automatically. + + + +**What did we just do?** + +We just registered a new API on your Tyk Gateway by sending a Tyk API definition to your Gateway's `/apis` endpoint. +Tyk API definitions encapsulate all of the settings for an API within Tyk Gateway and are discussed in detail in the [API section](/api-management/gateway-config-tyk-classic) of this documentation. + +**Restart or hot reload** + +Once you have created the file, you will need to either restart the Tyk Gateway, or issue a hot reload command, lets do the latter: +```curl +curl -H "x-tyk-authorization: {your-secret}" -s http://{your-tyk-host}:{port}/tyk/reload/group | python -mjson.tool +``` + +This command will hot-reload your API Gateway(s) and the new API will be loaded, if you take a look at the output of the Gateway (or the logs), you will see that it should have loaded Hello-World API on `/hello-world/`. + +#### Create an API in File-based Mode + + + +APIs created without API ID in file based mode are invalid. + + + + +To create a file-based API definition is very easy. + +Create a file called `api1.json` and place it in the `/apps` folder of your Tyk Gateway installation (usually in `/var/tyk-gateway`), then add the following: +```json +{ + "name": "Test API", + "slug": "test-api", + "api_id": "1", + "org_id": "1", + "auth_configs": { + "authToken": { + "auth_header_name": "Authorization" + } + }, + "definition": { + "location": "header", + "key": "x-api-version" + }, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "use_extended_paths": true + } + } + }, + "proxy": { + "listen_path": "/test-api/", + "target_url": "http://httpbin.org/", + "strip_listen_path": true + }, + "active": true +} +``` + +**Restart or hot reload** + +Once you have created the file, you will need to either restart the Tyk Gateway, or issue a hot reload command, lets do the latter: +```curl +curl -H "x-tyk-authorization: {your-secret}" -s https://{your-tyk-host}:{port}/tyk/reload/group | python -mjson.tool +``` + +This command will hot-reload your API Gateway(s) and the new API will be loaded, if you take a look at the output of the Gateway (or the logs), you will see that it should have loaded Test API on `/test-api/`. + +Your API is now ready to use via the Gateway. + +## Secure an API + +A security policy encapsulates several options that can be applied to a key. It acts as a template that can override individual sections of an API key (or identity) in Tyk. + +See [What is a Security Policy?](/api-management/policies#what-is-a-security-policy) for more details. + +### Tyk Cloud + + + +### Tyk Self Manged + + + +### Tyk Open Source + +#### Create a Policy with the Gateway + +Adding a policy to the Tyk Gateway is very easy. Polices are loaded into memory on load and so need to be specified in advanced in a file called `policies.json`. To add a policy, simply create or edit the `/policies/policies.json` file and add the policy object to the object array: + +```json +{ + "POLICYID": { + "access_rights": { + "{API-ID}": { + "allowed_urls": [], + "api_id": "{API-ID}", + "api_name": "{API-NAME}", + "versions": [ + "Default" + ] + } + }, + "active": true, + "name": "POLICY NAME", + "rate": 1000, + "per": 1, + "quota_max": 10000, + "quota_renewal_rate": 3600, + "tags": ["Startup Users"] + } +} +``` + +The above creates a new policy with a policy ID that you can define, with the rate limits, and security profile that grants access to the APIs listed in the `access_rights` section. + +- `{API-ID}`: The API ID you wish this policy to grant access to, there can be more than one of these entries. +- `{API-NAME}`: The name of the API that is being granted access to (this is not required, but helps when debugging or auditing). +- `POLICY NAME`: The name of this security policy. + +The important elements: + +- `access_rights`: A list of objects representing which APIs that you have configured to grant access to. +- `rate` and `per`: The number of requests to allow per period. +- `quota_max`: The maximum number of allowed requests over a quota period. +- `quota_renewal_rate`: how often the quota resets, in seconds. In this case we have set it to renew every hour. + +## Access an API + +### Tyk Cloud + + + +You will see a 200 response with your new key: + +```yaml +{ + "api_model": {}, + "key_id": "59bf9159adbab8abcdefghijac9299a1271641b94fbaf9913e0e048c", + "data": {...} +} +``` + +The value returned in the `key_id` parameter of the response is the access key you can now use to access the API that was specified in the `access_rights` section of the call. + +### Tyk Self Managed + + + +You will see a response with your new key: + +```json +{ + "action": "create", + "key": "c2cb92a78f944e9a46de793fe28e847e", + "status": "ok" +} +``` + +The value returned in the `key` parameter of the response is the access key you can now use to access the API that was specified in the `access_rights` section of the call. + +### Tyk Open Source + +To create an API Key, you will need the API ID that we wish to grant the key access to, then creating the key is an API call to the endpoint. + +**Prerequisite** + +- You will need your API secret, this is the `secret` property of the `tyk.conf` file. + +Once you have this value, you can use them to access the Gateway API, the below `curl` command will generate a key for one of your APIs, remember to replace `{API-SECRET}`, `{API-ID}` and `{API-NAME}` with the real values as well as the `curl` domain name and port to be the correct values for your environment. + +```curl +curl -X POST -H "x-tyk-authorization: {API-SECRET}" \ + -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d '{ + "allowance": 1000, + "rate": 1000, + "per": 1, + "expires": -1, + "quota_max": -1, + "org_id": "1", + "quota_renews": 1449051461, + "quota_remaining": -1, + "quota_renewal_rate": 60, + "access_rights": { + "{API-ID}": { + "api_id": "{API-ID}", + "api_name": "{API-NAME}", + "versions": ["Default"] + } + }, + "meta_data": {} + }' http://localhost:8080/tyk/keys/create | python -mjson.tool +``` + +The above creates a new key with the rate limits, and security profile that grants access to the APIs listed in the `access_rights` section. + +- `{API-ID}`: The API ID you wish this policy to grant access to, there can be more than one of these entries. +- `{API-NAME}`: The name of the API being granted access to (this is not required, but helps when debugging or auditing). + +The important elements: + +- `access_rights`: A list of objects representing which APIs you have configured to grant access to. +- `rate` and `per`: The number of allowed requests per period. +- `quota_max`: The maximum number of allowed requests over a quota period. +- `quota_renewal_rate`: how often the quota resets, in seconds. In this case, we have set it to renew every hour. + +You will see a response with your new key: + +```json +{ + "action": "create", + "key": "c2cb92a78f944e9a46de793fe28e847e", + "status": "ok" +} +``` + +The value returned in the `key` parameter of the response is the access key you can now use to access the API that was specified in the `access_rights` section of the call. + +## Import an API + +Tyk supports importing both API Blueprint and Swagger (OpenAPI) JSON definitions from either the Gateway or the Dashboard. Tyk will output the converted file to to `stdout`. Below are the commands you can use to get Tyk to switch to command mode and generate the respective API definitions for both API Blueprint and Swagger files. + +### API Blueprint is being deprecated + +Our support for API Blueprint is being deprecated. We have been packaging [aglio](https://github.com/danielgtaylor/aglio) in our Docker images for the Dashboard which enables rendering API Blueprint Format in the portal. This module is no longer maintained and is not compatible with newer NodeJS. If you wish to continue using this feature, you can do so by installing the module yourself in your Dockerfile. The imapct of this change is that our Docker images will no longer contain this functionality. + +As a work around, you can do the following: + +* Create API Blueprint in JSON format using the Apiary [Drafter](https://github.com/apiaryio/drafter) tool +* Convert API Blueprint to OpenAPI (Swagger) using the Apiary [API Elements CLI](https://github.com/apiaryio/api-elements.js/tree/master/packages/cli) tool. + +### Using API Blueprint + + + +See [note](#api-blueprint-is-being-deprecated) above regarding deprecation of support for API Blueprint. + + + +Tyk supports an easy way to import Apiary API Blueprints in JSON format using the command line. + +Blueprints can be imported and turned into standalone API definitions (for new APIs) and also imported as versions into existing APIs. + +It is possible to import APIs and generate mocks or to generate Allow Lists that pass-through to an upstream URL. + +All imported Blueprints must be in the JSON representation of Blueprint's markdown documents. This can be created using Apiary's [Snow Crash tool](https://github.com/apiaryio/snowcrash). + +Tyk outputs all new API definitions to `stdout`, so redirecting the output to a file is advised in order to generate new definitions to use in a real configuration. + +#### Importing a Blueprint as a new API: + +Create a new definition from the Blueprint: + +```{.copyWrapper} +./tyk --import-blueprint=blueprint.json --create-api --org-id= --upstream-target="http://widgets.com/api/" +``` + +#### Importing a definition as a version in an existing API: + +Add a version to a definition: + +```{.copyWrapper} +./tyk --import-blueprint=blueprint.json --for-api= --as-version="version_number" +``` + +#### Creating your API versions as a mock + +As the API Blueprint definition allows for example responses to be embedded, these examples can be imported as forced replies, in effect mocking out the API. To enable this mode, when generating a new API or importing as a version, simply add the `--as-mock` parameter. + +### Using Swagger (OpenAPI) + +Tyk supports importing Swagger documents to create API definitions and API versions. Swagger imports do not support mocking though, so sample data and replies will need to be added manually later. + +#### Importing a Swagger document as a new API + +Create a new definition from Swagger: + +```{.copyWrapper} +./tyk --import-swagger=petstore.json --create-api --org-id= --upstream-target="http://widgets.com/api/" +``` + + +When creating a new definition from an OAS 3.0 spec, you will have to manually add the listen path after the API is created. + + + + +#### Importing a Swagger document as a version into an existing API + +Add a version to a definition: + +```{.copyWrapper} +./tyk --import-swagger=petstore.json --for-api= --as-version="version_number" +``` + +#### Mocks + +Tyk supports API mocking using our versioning `use_extended_paths` setup, adding mocked URL data to one of the three list types (white_list, black_list or ignored). In order to handle a mocked path, use an entry that has `action` set to `reply`: + +```json +"ignored": [ + { + "path": "/v1/ignored/with_id/{id}", + "method_actions": { + "GET": { + "action": "reply", + "code": 200, + "data": "Hello World", + "headers": { + "x-tyk-override": "tyk-override" + } + } + } + } +], +``` + +See [Versioning](/api-management/gateway-config-tyk-classic#tyk-classic-api-versioning) for more details. + +### Import APIs via the Dashboard API + + + +### Import APIs via the Dashboard UI + +1. **Select "APIs" from the "System Management" section** + + API listing + +2. **Click "IMPORT API"** + + Add API button location + + Tyk supports the following import options: + + 1. From an Existing Tyk API definition + 2. From a Apiary Blueprint (JSON) file + 3. From a Swagger/OpenAPI (JSON only) file + 4. From a SOAP WSDL definition file (new from v1.9) + + To import a Tyk Definition, just copy and paste the definition into the code editor. + + For Apiary Blueprint and Swagger/OpenAPI, the process is the same. For example: + + Click the "From Swagger (JSON)" option from the pop-up + + Import popup + + For WSDL: + + Import WSDL + +3. **Enter API Information** + + You need to enter the following information: + + * Your **Upstream Target** + * A **Version Name** (optional) + * An optional **Service Name** and **Port** (WSDL only) + * Copy code into the editor + +4. **Click "Generate API"** + + Your API will appear in your APIs list. If you select **EDIT** from the **ACTIONS** drop-down list, you can see the endpoints (from the [Endpoint Designer](/api-management/dashboard-configuration#exploring-api-endpoint-designer)) that have been created as part of the import process. + +### Creating a new API Version by importing an API Definition using Tyk Dashboard + +As well as importing new APIs, with Tyk, you can also use import to create a new version of an existing Tyk Classic API. + +1. Open the API Designer page and select Import Version from the **Options** drop-down. + + Import API Version Drop-Down + +2. Select either OpenAPI (v2.0 or 3.0) or WSDL/XML as your source API + +3. You need to add a new **API Version Name**. **Upstream URL** is optional. + + Import API Version Configuration + +4. Click **Import API**. + + Import API + +5. Select the **Versions** tab and your new version will be available. +6. Open the **Endpoint Designer** for your API and select your new version from **Edit Version**. +7. You will see all the endpoints are saved for your new version. + +Version Endpoints + +##### Import from an OpenAPI v2.0 Document + +1. From the Import API screen, select OpenAPI. + + Import OAS 2.0 API + +2. Paste your OAS v2.0 compliant definition into the code editor. + + OAS 2.0 definition in Editor + +3. Note that the Dashboard has detected that an OAS v2.0 definition has been imported and you need to specify an upstream URL field to proceed. + + Upstream URL + +4. Click **Import API**. + + Import API + + Your API will be added to your list of APIs. diff --git a/api-management/gateway-config-managing-oas.mdx b/api-management/gateway-config-managing-oas.mdx new file mode 100644 index 000000000..401fadee3 --- /dev/null +++ b/api-management/gateway-config-managing-oas.mdx @@ -0,0 +1,872 @@ +--- +title: "Working with Tyk OAS APIs" +description: "How to work with Tyk OAS APIs" +keywords: "Tyk OAS API, Create, Update, Import, Export, Versioning, API Key, Security Policy" +sidebarTitle: "Tyk OAS APIs" +--- + +## Overview + +Tyk's support for the OpenAPI Specification is designed to fit in with your existing workflows as seamlessly as possible, whether you have one of our paid offerings, or are using our free open-source Gateway. You should be able to do a huge amount in the editor of your choice. The Tyk Dashboard's API Designer will support you whether you want to create a new API from a blank slate, or just to dip into if you want a bit of help with configuring Tyk's powerful transformation middleware. + +One of the great things about working with Tyk is that the OpenAPI document containing the OAS compliant description of your service is a single file (or group of files) that you deploy throughout your workflow. You can iterate on that document within your source control system until you are totally happy. At this point, you can publish the OpenAPI description to your Developer Portal to document what a Developer needs to use the API (and nothing they don’t need to know). As the OpenAPI description is the source of truth for the Tyk OAS API definition and can be updated without impacting the Tyk Vendor Extension, you can automate deployment of updates to your API on Tyk whenever a new version is committed into your source control. This model is very popular in GitOps and CI/CD environments. + +Tyk OAS API workflow + + + +Warning + +In Tyk Gateway release 5.3.0, Tyk OAS APIs gained feature maturity. Tyk Dashboard will automatically migrate any pre-5.3.0 Tyk OAS APIs to the feature mature standard when you upgrade to 5.3.0 or later. Tyk OAS APIs prior to v5.3.0 must be manually migrated if you are using Tyk OSS Gateway. Feature mature Tyk OAS APIs may not work with pre-5.3.0 versions of Tyk Gateway. + +It is not possible to rollback to previous versions of Tyk components with Tyk OAS APIs created in 5.3.0. + +For further details, please refer to the [release notes](/developer-support/release-notes/gateway) for Tyk Gateway v5.3.0. + + + +### API Definition Management with Tyk + +There are three methods by which API definitions can be deployed to Tyk: using the [Tyk Dashboard API Designer](/api-management/dashboard-configuration), using the [Tyk Dashboard API](/api-management/dashboard-configuration#exploring-the-dashboard-api) and using the [Tyk Gateway API](/tyk-gateway-api). + +The first two options provide access to the powerful licensed features of Tyk, whilst the third is used for open source deployments. Tyk provides additional tools to assist with automation when using the Tyk Dashboard API - namely [Tyk Operator](/api-management/automations/operator)(for Kubernetes deployments) and [Tyk Sync](/api-management/automations/sync) (for gitops). + +| Feature | API Designer | Tyk Dashboard API | Tyk Gateway API | +| :------------------- | :-------------- | :------------------- | :----------------- | +| Work with YAML format | βœ… | βœ… | ❌ | +| Work with JSON format | βœ… | βœ… | βœ… | +| Import an OpenAPI description | βœ… | βœ… | βœ… | +| Import a complete Tyk OAS API definition | βœ… | βœ… | βœ… | +| Import [multi-part OpenAPI descriptions](/api-management/gateway-config-managing-oas#multi-part-openapi-documents) | βœ… | βœ… | ❌ | +| Apply API [Templates](/api-management/dashboard-configuration#governance-using-api-templates) | βœ… | βœ… | ❌ | +| Export the OpenAPI description | βœ… | βœ… | βœ… | +| Export the Tyk OAS API definition | βœ… | βœ… | βœ… | +| Update API with new OpenAPI description | βœ… | βœ… | βœ… | +| Manage API versions | βœ… | βœ… | βœ… | +| Assign APIs to [Categories](/api-management/dashboard-configuration#governance-using-api-categories) | βœ… | βœ… | ❌ | +| Assign API [Owners](/api-management/user-management#api-ownership) | βœ… | βœ… | ❌ | + +## Creating an API + +Tyk is designed to fit into your workflow, so has full support for you to import your existing OpenAPI descriptions as the starting point for a Tyk OAS API. Tyk can automatically configure aspects of the Gateway's API security and management functions based upon the content of the OpenAPI description, for example using the security settings to configure client authentication or the endpoint examples and schemas to configure request validation and mock response middleware. + +Alternatively, if you don't have an existing OpenAPI description, you can use the API Designer to bootstrap one for you: build your API in Tyk and then export an OAS compliant description that you can use elsewhere, for example as documentation for your new API. + +### Using Tyk Dashboard API Designer to create an API + +In this tutorial we guide you through the steps to create a new Tyk OAS API using the GUI. + +{/* - hiding this video as it is out of date */} + +1. Start by selecting **APIs** from the **API Management** section + + Add new API + +2. Now select **Add New API** and then, choose **Design from scratch** + + Start designing a new API + +3. Now complete the basic configuration for your new API following the guided steps providing: + - API name + - API type (**HTTP**) + - API style (**OpenAPI**) + - [API template](/api-management/dashboard-configuration#working-with-api-templates-using-the-template-designer) (optional) + - Upstream URL + + Basic configuration of the new API + +4. Deploy the API to your Gateway + + - If you are using Tyk Cloud or a [sharded](/api-management/multiple-environments) deployment you will be prompted to select on which Gateways the API should be deployed + + Choose where to deploy the API + + - You need to set the **API status** (if you set this to **Active**, Tyk will accept requests to the API) + - You need to set the **Access** (set this to **External** to expose your API outside Tyk so that your clients can consume it) + - When creating a new API you will probably want to set API status to **Inactive** while you configure the rest of the API definition + + Set API Status + + Click **Save API** to create the API definition and, depending on the options you chose for API status and access, deploy the API to your gateway to start serving traffic. + + + + + You can see the URL given to your API, in the Info section displayed at the top of the page (**API URL**). + + + +5. Secure your API by configuring [client authentication](/api-management/client-authentication) + + From the API page: + + 1. Click **Edit** + 2. Scroll down to the **Server** section and enable **Authentication** + 3. Select **Auth Token** from the drop-down list + 4. For **Authentication token location** select **Use header value** + 5. Note that the default Auth key header name is *Authorization* + 6. Save your API + +6. Declare endpoints for your API + + 1. After selecting **Edit**, move to the **Endpoints** tab. + + Add new endpoint + + + 2. Click **Add Endpoint** then complete the requested details for the new endpoint: + + - Select a method from the drop-down list + - Add a path for your endpoint + - Add an optional summary and description + - select **Add Endpoint** + + Provide the details of the new endpoint + + 3. Your new endpoint will now be listed in the Endpoints tab + + List of all endpoints declared for the API + + 4. You can now add [middleware](/api-management/traffic-transformation) to your endpoint via the **Add Middleware** button. + + 5. Click **Save API** to apply the changes to your API. + +7. Test your API + + From the **Info** section, copy the [API base path](/api-management/gateway-config-managing-oas#api-base-path) and send a request to the API without providing an authorization token: + + ``` + curl --location --request GET 'http://localhost:8181/petstore/' \ + --header 'Authorization: wrongkey' + ``` + + Note that the Gateway will respond with the following error message, confirming that authentication is required: + + ```.json + { + "error": "Access to this API has been disallowed" + } + ``` + +### Using your own code editor to create Tyk OAS API definitions + +The API definition is often generated either from the codebase or using API design tools (such as [Swagger Editor](https://editor.swagger.io/), [Postman](https://www.postman.com/) and [Stoplight](https://stoplight.io/)). + +To enjoy writing a *Tyk OAS API definition* as if it is [a native programming language](https://tyk.io/blog/get-productive-with-the-tyk-intellisense-extension/), you can add the [Tyk OAS API definition schema](https://raw.githubusercontent.com/TykTechnologies/tyk-schemas/main/JSON/draft-04/schema_TykOasApiDef_3.0.x.json) to your favorite IDE or editor. We have published a Tyk VS Code extension that provides Tyk API schema validation and auto-completion (both OAS and other schemas) in the [VS Code marketplace](https://marketplace.visualstudio.com/items?itemName=TykTechnologiesLimited.tyk-schemas). You can use it to create Tyk objects in your IDE (Tyk API definitions, Key and Tyk config file). + +#### Loading the API definition into Tyk + + + + + +Armed with a Tyk OAS API definition, in YAML or JSON format, you can use this to create an API in Tyk Dashboard with only a few clicks. + +1. Start by selecting **APIs** from the **API Management** section + + Add new API + +2. Now select **Add New API** and then, choose **Import**. + + Loading the API definition into Tyk Dashboard + + Note that you can optionally apply an [API template](/api-management/dashboard-configuration#governance-using-api-templates) by choosing **Start from template** as explained [here](/api-management/dashboard-configuration#using-a-template-when-creating-a-new-api), however in this explanation we will not be applying a template. + +3. From the Import API screen, select **Tyk API** because the object you want to import to Tyk is a complete API definition. + + Choosing what to import + + + + + On the Import API screen, there are three options for Import Type, it is important to select the correct one for the object that you want to load into Tyk: + + - openAPI is used only for [OpenAPI descriptions](/api-management/gateway-config-tyk-oas#openapi-description) (without the [Tyk Vendor Extension](/api-management/gateway-config-tyk-oas#tyk-vendor-extension)) + - TykAPI is used for a full [Tyk OAS API definition](/api-management/gateway-config-tyk-oas#what-is-a-tyk-oas-api-definition) (comprising OpenAPI description plus Tyk Vendor Extension) or Tyk Classic API definition + - WSDL/XML is used for WSDL/XML content and will result in a Tyk Classic API + + + +4. Now you can paste the entire Tyk OAS API definition into the text editor. + + Loading the API definition into Tyk Dashboard + +5. Select **Import API** to complete the import and create the API based on your API definition. + + + + + +When making calls to the Tyk Dashboard API you'll need to set the domain name and port for your environment and provide credentials in the `Authorization` field for Tyk to authorize your request, as follows: + +| Interface | Port | Authorization Header | Authorization credentials | +| :------------------- | :------ | :---------------------- | :----------------------------- | +| Tyk Dashboard API | 3000 | `Authorization` | From Dashboard User Profile | + +You can obtain your authorization credential (Dashboard API key) from the Tyk Dashboard UI: + +- Select **Edit profile** from the dropdown that appears when you click on your username in the top right corner of the screen +- Scroll to the bottom of the page were you will see your **Tyk Dashboard API Access Credentials** + +You will also need to have β€˜admin’ or β€˜api:write’ permission if [RBAC](/api-management/user-management) is enabled. + +To create the API in Tyk, you simply send your Tyk OAS API Definition in the payload to the `POST /api/apis/oas` endpoint of your Tyk Dashboard API. + +| Property | Description | +| :-------------- | :-------------------------- | +| Resource URL | `/api/apis/oas` | +| Method | `POST` | +| Type | None | +| Body | Tyk OAS API Definition | +| Parameters | Query: `templateID` | + +Using [this](https://bit.ly/39jUnuq) API definition it is possible to create a Tyk OAS API on your Tyk Gateway that forwards requests to the [Swagger Petstore](https://petstore3.swagger.io) request/response service. + +``` +curl -H "Authorization: ${DASH_KEY}" -H "Content-Type: application/json" ${DASH_URL}/apis/oas -d "$(wget -qO- https://bit.ly/39jUnuq)" +``` + +**Check request response** + +If the command succeeds, you will see the following response, where `Meta` contains the unique identifier (`id`) for the API you have just created. If you did not provide a value in the `id` field, then Tyk will automatically assign one. + +``` +{ + "Status": "OK", + "Message": "API created", + "Meta": {NEW-API-ID} +} +``` + +What you have done is to send a Tyk OAS API definition to Tyk Dashboard's `/api/apis/oas` endpoint resulting in the creation of the API in your Tyk Dashboard which will automatically deploy it to your Gateway. + +You can use the optional `templateId` parameter to apply an [API Template](/api-management/dashboard-configuration#applying-a-template-when-creating-an-api-from-a-tyk-oas-api-definition) to your API definition when creating the API. + + + + + +When making calls to the Tyk Gateway API you'll need to set the domain name and port for your environment and provide credentials in the `x-tyk-authorization` field for Tyk to authorize your request, as follows: + +| Interface | Port | Authorization Header | Authorization credentials | +| :----------------- | :------ | :----------------------- | :---------------------------------- | +| Tyk Gateway API | 8080 | `x-tyk-authorization` | `secret` value set in `tyk.conf` | + +To create the API in Tyk, you simply send your Tyk OAS API Definition in the payload to the `POST /tyk/apis/oas` endpoint of your Tyk Gateway API. + +Using [this](https://bit.ly/39tnXgO) minimal API definition it is possible to create a Tyk OAS API on your Tyk Gateway using only 30 lines: + +```curl +curl --location --request POST 'http://{your-tyk-host}:{port}/tyk/apis/oas' \ +--header 'x-tyk-authorization: {your-secret}' \ +--header 'Content-Type: text/plain' \ +--data-raw +'{ + "info": { + "title": "Petstore", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": {}, + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "Petstore", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://petstore.swagger.io/v2" + }, + "server": { + "listenPath": { + "value": "/petstore/", + "strip": true + } + } + } +}' +``` + +**Check request response** + +If the command succeeds, you will see the following response, where `key` contains the unique identifier (`id`) for the API you have just created. If you did not provide a value in the `id` field, then Tyk will automatically assign one. + +```.json +{ + "key": {NEW-API-ID}, + "status": "ok", + "action": "added" +} +``` + +What you have done is to send a Tyk OAS API definition to Tyk Gateway's `/tyk/apis/oas` endpoint resulting in the creation of the API in your Tyk Gateway. + +**Restart or hot reload** + +Once you have created your API you need to load it into the Gateway so that it can serve traffic. To do this you can either restart the Tyk Gateway or issue a [hot reload](/tyk-stack/tyk-gateway/important-prerequisites#hot-reload-is-critical-in-tyk-ce) command: + +```.curl +curl -H "x-tyk-authorization: {your-secret}" -s http://{your-tyk-host}:{port}/tyk/reload/group +``` + +You can go to the `/apps` folder of your Tyk Gateway installation (by default in `/var/tyk-gateway`) to see where Tyk has stored your Tyk OAS API Definition. + + + + + +### Importing an OpenAPI description to create an API + +Tyk will automatically update the `servers` section in the imported OpenAPI description, adding the base path URL to which requests should be sent to access the new API. It will take the existing entry and use this to generate the upstream (target) URL if none is provided. + + + + + +If you have a valid OAS 3.0 compliant OpenAPI description, in YAML or JSON format, you can use this to create an API in Tyk Dashboard with only a few clicks. + +1. Start by selecting **APIs** from the **API Management** section + + Add new API + +2. Now select **Add New API** and then, choose **Import**. + + Loading the API definition into Tyk Dashboard + +3. From the Import API screen, select **openAPI** because the object you want to import to Tyk is an OpenAPI description. + + Choosing what to import + + + + + On the Import API screen, there are three options for Import Type, it is important to select the correct one for the object that you want to load into Tyk: + + - openAPI is used only for [OpenAPI descriptions](/api-management/gateway-config-tyk-oas#openapi-description) (without the [Tyk Vendor Extension](/api-management/gateway-config-tyk-oas#tyk-vendor-extension)) + - TykAPI is used for a full [Tyk OAS API definition](/api-management/gateway-config-tyk-oas#what-is-a-tyk-oas-api-definition) (comprising OpenAPI description plus Tyk Vendor Extension) or Tyk Classic API definition + - WSDL/XML is used for WSDL/XML content and will result in a Tyk Classic API + + + +4. Now you can choose the location of the OpenAPI description, which can be: + + - pasted into the text editor + - uploaded using a file picker + - retrieved from a file server + + Loading the API definition into Tyk Dashboard + +5. You can optionally apply an [API template](/api-management/dashboard-configuration#governance-using-api-templates) from the drop-down. + + Applying a template + +6. You can configure the *listen path* and *upstream (target) URL* in the **Manual configuration options** section. Note that if you do not provide a listen path, Tyk will default to `/` and if you do not provide an upstream URL, Tyk will use the first value provided in the [servers.url](/api-management/gateway-config-managing-oas#api-base-path) section in the OpenAPI description. + + Configuring the listen path and upstream URL + +7. Tyk can automatically configure the request processing middleware chain based upon configuration defined by the OpenAPI Specification. If your OpenAPI desription contains the relevant data then select the characteristics you would like to configure. + + Configuring the listen path and upstream URL + + | Middleware | OpenAPI data used for configuration | + |------------|-------------------------------------| + | [Request validation](/api-management/traffic-transformation/request-validation#request-schema-in-openapi-specification) | Endpoints that have `requestBody` or `schema` | + | [Mock response](/api-management/traffic-transformation/mock-response#mock-responses-using-openapi-metadata) | Endpoints with `examples` or `schema` | + | [Client authentication](/api-management/client-authentication#how-does-tyk-implement-authentication-and-authorization) | Defined in `security` and `securitySchemes` | + | [Allow list](/api-management/traffic-transformation/allow-list) | Restrict access only to declared endpoint paths | + +8. Select **Import API** to complete the import and create the API based on your API definition. + + + + + +When making calls to the Tyk Dashboard API you'll need to set the domain name and port for your environment and provide credentials in the `Authorization` field for Tyk to authorize your request, as follows: + +| Interface | Port | Authorization Header | Authorization credentials | +| :------------------- | :------ | :---------------------- | :----------------------------- | +| Tyk Dashboard API | 3000 | `Authorization` | From Dashboard User Profile | + +You can obtain your authorization credential (Dashboard API key) from the Tyk Dashboard UI: + +- Select **Edit profile** from the dropdown that appears when you click on your username in the top right corner of the screen +- Scroll to the bottom of the page were you will see your **Tyk Dashboard API Access Credentials** + +You will also need to have β€˜admin’ or β€˜api:write’ permission if [RBAC](/api-management/user-management) is enabled. + +To create the API in Tyk, you simply send your OpenAPI document in the payload to the `POST /api/apis/oas/import` endpoint of your Tyk Dashboard API. + +| Property | Description | +| :-------------- | :------------------------------------------ | +| Resource URL | `/api/apis/oas/import` | +| Method | `POST` | +| Type | None | +| Body | OpenAPI Document | +| Parameters | Query: `listenPath` `upstreamURL` `authentication` `allowList` `validateRequest` `mockResponse` `apiID` `templateId` | + +The optional parameters are: + +| Parameter | Effect | Default if omitted | +| :------------------- | :--------------------------------- | :-------------------- | +| `listenPath` | Set the listen path for the API | Defaults to `/` | +| `upstreamURL` | Set the upstream (target) URL | Defaults to the first URL in the `servers` section of the [OpenAPI description](/api-management/gateway-config-managing-oas#api-base-path) | +| `authentication` | Configure [client authentication](/api-management/client-authentication#how-does-tyk-implement-authentication-and-authorization) based on `security` and `securitySchemes` | Client authentication is not configured | +| `allowList` | Enable [allow list](/api-management/traffic-transformation/allow-list) middleware for all endpoints declared in the OpenAPI description | Allow list not configured | +| `validateRequest` | Configure [request validation](/api-management/traffic-transformation/request-validation#request-schema-in-openapi-specification) for all endpoints with `requestBody` or `schema` defined | Request validation not configured | +| `mockResponse` | Configure [mock response](/api-management/traffic-transformation/mock-response#mock-responses-using-openapi-metadata) for all endpoints with `examples` or `schema` defined | Mock response not configured | +| `apiID` | Id to be assigned to the new API | Tyk will determine and assign a unique Id | +| `templateId` | Apply the selected [API template](/api-management/dashboard-configuration#applying-a-template-when-creating-an-api-from-a-tyk-oas-api-definition) when creating the API | No template is applied | + +**Check request response** + +If the command succeeds, you will see the following response, where `Meta` contains the unique identifier (`id`) for the API you have just created. + +``` +{ + "Status": "OK", + "Message": "API created", + "Meta": {NEW-API-ID} +} +``` + + + + + +When making calls to the Tyk Gateway API you'll need to set the domain name and port for your environment and provide credentials in the `x-tyk-authorization` field for Tyk to authorize your request, as follows: + +| Interface | Port | Authorization Header | Authorization credentials | +| :----------------- | :------ | :----------------------- | :---------------------------------- | +| Tyk Gateway API | 8080 | `x-tyk-authorization` | `secret` value set in `tyk.conf` | + +To create the API in Tyk, you simply send your OpenAPI document in the payload to the `POST /tyk/apis/oas/import` endpoint of your Tyk Gateway API. + +| Property | Description | +| :-------------- | :------------------------------------------ | +| Resource URL | `/tyk/apis/oas/import` | +| Method | `POST` | +| Type | None | +| Body | OpenAPI Document | +| Parameters | Query: `listenPath` `upstreamURL` `authentication` `allowList` `validateRequest` `mockResponse` `apiID` | + +The optional parameters are: + +| Parameter | Effect | Default if omitted | +| :------------------- | :--------------------------------- | :-------------------- | +| `listenPath` | Set the listen path for the API | Defaults to `/` | +| `upstreamURL` | Set the upstream (target) URL | Defaults to the first URL in the `servers` section of the [OpenAPI description](/api-management/gateway-config-managing-oas#api-base-path) | +| `authentication` | Configure [client authentication](/api-management/client-authentication#how-does-tyk-implement-authentication-and-authorization) based on `security` and `securitySchemes` | Client authentication is not configured | +| `allowList` | Enable [allow list](/api-management/traffic-transformation/allow-list) middleware for all endpoints declared in the OpenAPI description | Allow list not configured | +| `validateRequest` | Configure [request validation](/api-management/traffic-transformation/request-validation#request-schema-in-openapi-specification) for all endpoints with `requestBody` or `schema` defined | Request validation not configured | +| `mockResponse` | Configure [mock response](/api-management/traffic-transformation/mock-response#mock-responses-using-openapi-metadata) for all endpoints with `examples` or `schema` defined | Mock response not configured | +| `apiID` | Id to be assigned to the new API | Tyk will determine and assign a unique Id | + +**Check request response** + +If the command succeeds, you will see the following response, where `key` contains the unique identifier (`id`) for the API you have just created. + +```.json +{ + "key": {NEW-API-ID}, + "status": "ok", + "action": "added" +} +``` + +**Restart or hot reload** + +Once you have created your API you need to load it into the Gateway so that it can serve traffic. To do this you can either restart the Tyk Gateway or issue a [hot reload](/tyk-stack/tyk-gateway/important-prerequisites#hot-reload-is-critical-in-tyk-ce) command: + +```.curl +curl -H "x-tyk-authorization: {your-secret}" -s http://{your-tyk-host}:{port}/tyk/reload/group +``` + +You can go to the `/apps` folder of your Tyk Gateway installation (by default in `/var/tyk-gateway`) to see where Tyk has stored your Tyk OAS API Definition. + + + + + +#### API base path + +The [API base path](https://swagger.io/docs/specification/v3_0/api-host-and-base-path/) is the URL that a client should use when consuming (sending requests to) the API deployed on Tyk. This will comprise the address of the Tyk Gateway plus the API's listen path. + +**Detecting an Existing API Base Path** + +When creating an API, Tyk analyzes the `servers.url` section of the OpenAPI description to determine if it already contains a valid API base path. + +- If the first entry in `servers.url` is an address on the Tyk Gateway, then this is considered a valid API base path. +- If there is not a valid API base path, then Tyk will assume that the first value in `servers.url` is the address of the upstream service - and so will use this as the *upstream (target) URL* for the API proxy. If there are multiple entries in `servers.url` Tyk will only consider the first entry and ignore all others. + +Tyk supports [OpenAPI server variables](https://learn.openapis.org/specification/servers.html#server-variables), so if the first `servers` entry contains a parameterised URL, Tyk will fill in the parameters with the values provided in the `variables` associated with that entry. + +**Setting the API Base Path** + +If the `servers.url` section did not contain a valid *API base path* then Tyk will insert a new entry in the first location in `servers.url` with a valid API base path comprising the Tyk Gateway address plus the *listen path* for the API. + +For example, given the following fragment of the OpenAPI description and importing to a Tyk Gateway at `https://my-gateway.com` specifying a listen path of `my-api`: + +```yaml + servers: + - url: https://upstream-A.com + - url: http://upstream-B.com +``` + +Tyk will configure the Tyk OAS API with the following: + +```yaml + servers: + - url: https://my-gateway.com/my-api/ + - url: https://upstream-A.com + - url: http://upstream-B.com + + x-tyk-api-gateway: + server: + listenPath: + value: /my-api/ + upstream: + url: https://upstream-A.com +``` + + + +This can introduce a change to the "source of truth" (OpenAPI description) for the API (the addition of the API base path). We recommend that you export the modified OpenAPI description and apply this to your documentation, as it provides the address to which clients should direct their traffic. + + + +**Upstream URL Override** + +The servers section is not analyzed if an upstream (target) URL is specified during the import action. If an upstream URL was specified, that will be used as the upstream for the API. The API base path will still be constructed and added to the `servers` section of the OpenAPI description. + +**Tyk does not support relative URLs** + +If the first entry is a relative URL, or another format that Tyk cannot process, the import will fail with an error. + +For example attempting to import an OpenAPI description containing this configuration: + +```yaml + servers: + - url: /relative-url + - url: http://upstream-B.com +``` +will error with the following message: + +```json +{ + "status": "error", + "message": "error validating servers entry in OAS: Please update \"/relative-url\" to be a valid url or pass a valid url with upstreamURL query param" +} +``` + +#### Multi-part OpenAPI documents + +OAS 3.0 allows an OpenAPI description to be [split across multiple files](https://swagger.io/docs/specification/v3_0/using-ref/) by use of the `$ref` keyword. + +This allows you to share snippets of the API definition across multiple APIs, or to have specific ownership of elements of the API configuration owned by different teams. + +From Tyk 5.8.0 there is full support for these multi-part OpenAPI documents with Tyk Dashboard. + +We consider two different types of file containing these OpenAPI descriptions: + +- the **main fragment**, which contains the `info` section +- the **secondary fragments**, which contain snippets of the OpenAPI description that are referred to using external references (`$ref`) + +Note that secondary fragments can also contain external references to other secondary fragments (but not to the main fragment). + +When creating or updating an API, you simply provide Tyk with the **main fragment** and ensure that all of the references can be resolved. + +Resolution can be: +- local, by providing a ZIP archive containing all fragments +- remote, by providing resolvable paths to the secondary fragments (this is particularly used if the main fragment is provided via URL, as all fragments can then exist on the same file server). + + + + + The **main fragment** must be in a file named `openapi.json` or `openapi.yaml` (depending on the format used). + + + +##### Creating the ZIP Archive + +When creating ZIP archives for the multi-part OpenAPI import feature, it's important to exclude operating system metadata files that could interfere with the import process. + +- **MacOS Users** + + When using the `zip` command on MacOS, include the `-X` flag to exclude extended attributes and hidden files: `zip -X -r archive.zip directory/` + +- **Linux Users** + + When using the `zip` command on Linux, you can exclude hidden files using: `zip -r archive.zip directory/ -x "*/\.*"` + + To exclude specific metadata files: `zip -r archive.zip directory/ -x "*/\.*" "*/Thumbs.db" "*/.DS_Store"` + +- **Windows Users** + + When using PowerShell to create ZIP archives on Windows, you can exclude hidden and system files with: `Compress-Archive -Path "directory\*" -DestinationPath "archive.zip" -CompressionLevel Optimal` + + To exclude specific metadata files (like Thumbs.db or .DS_Store) you can use: `Get-ChildItem "directory" -Recurse -File | Where-Object { $_.Name -notmatch '(^\.DS_Store$|^Thumbs\.db$)' } | Compress-Archive -DestinationPath "archive.zip"` + +- **Using GUI Tools** + + If using GUI tools like WinZip, WinRAR, or the built-in archive utilities: + - Ensure options to include hidden/system files are disabled + - Look for options like "Store Mac OS X resource forks/special files" and disable them + - Some tools have specific options to exclude .DS_Store files and other metadata + +Including these unwanted files may cause validation errors during the import process. + +## Maintaining your APIs + +Once a Tyk OAS API has been created in Tyk the Gateway will manage traffic to the exposed endpoints and proxy requests to the upstream service. + +Your service might evolve over time, with new features and endpoints being added and others retired. Tyk's flexible API versioning and update options provide you with choice for how to reflect this evolution in the APIs you expose to your clients. + +Your OpenAPI description is a living document that describes your upstream service. When this changes (for example, due to the addition of a new endpoint) you can use Tyk's [update API](/api-management/gateway-config-managing-oas#updating-an-api) feature to seamlessly apply the updated OpenAPI description, instantly extending the API proxy to handle traffic as your upstream evolves. + +Alternatively, and especially when you need to make breaking changes as your services and APIs evolve, you can create new [versions](/api-management/api-versioning) of your API and use configurable version identifiers to route traffic to the appropriate target. + +### Updating an API + +As developers working on services it can be necessary to regularly update the API when, for example, we add endpoints or support new methods. + +One of the most powerful features of working with Tyk OAS is that you can make changes to the OpenAPI description outside Tyk and then update your API with the updated details. You can simply update the OpenAPI part of the Tyk OAS API definition without having to make any changes to the [Tyk Vendor Extension](/api-management/gateway-config-tyk-oas#tyk-vendor-extension) (`x-tyk-api-gateway`). + +You can alternatively work on the full Tyk OAS API definition outside Tyk and update your existing API proxy with the new configuration, without having to create a [new version](/api-management/api-versioning) of the API. + + + + + +If you have an updated OpenAPI description or Tyk OAS API definition, in YAML or JSON format, you can use this to modify your existing API in Tyk Dashboard with only a few clicks. + +1. Start by selecting your API from the list on the **APIs** page in the **API Management** section. + +2. Now select **Update OAS** from the **Actions** dropdown. + + Select Update OAS to import the new OpenAPI description + +3. Now you can choose the location of the file that you want to use to update your API, which can be: + + - pasted into the text editor + - uploaded using a file picker + - retrieved from a file server + + Configuring the import location and options + +4. You can re-configure the *listen path* and *upstream (target) URL* in the **Manual configuration options** section, but if you do not provide these then Tyk will leave them unchanged. + +5. Tyk must select the options to automatically configure the request processing middleware chain based upon configuration defined by the OpenAPI Specification for any new endpoints added in the update. If your OpenAPI desription contains the relevant data then select the characteristics you would like to configure. + + Configuring the listen path and upstream URL + + | Middleware | OpenAPI data used for configuration | + |------------|-------------------------------------| + | [Request validation](/api-management/traffic-transformation/request-validation#request-schema-in-openapi-specification) | Endpoints that have `requestBody` or `schema` | + | [Mock response](/api-management/traffic-transformation/mock-response#mock-responses-using-openapi-metadata) | Endpoints with `examples` or `schema` | + | [Client authentication](/api-management/client-authentication#how-does-tyk-implement-authentication-and-authorization) | Defined in `security` and `securitySchemes` | + | [Allow list](/api-management/traffic-transformation/allow-list) | Restrict access only to declared endpoint paths | + +8. Select **Import API** to complete the update. + + + + + +When making calls to the Tyk Dashboard API you'll need to set the domain name and port for your environment and provide credentials in the `Authorization` field for Tyk to authorize your request, as follows: + +| Interface | Port | Authorization Header | Authorization credentials | +| :------------------- | :------ | :---------------------- | :----------------------------- | +| Tyk Dashboard API | 3000 | `Authorization` | From Dashboard User Profile | + +You can obtain your authorization credential (Dashboard API key) from the Tyk Dashboard UI: + +- Select **Edit profile** from the dropdown that appears when you click on your username in the top right corner of the screen +- Scroll to the bottom of the page were you will see your **Tyk Dashboard API Access Credentials** + +You will also need to have β€˜admin’ or β€˜api:write’ permission if [RBAC](/api-management/user-management) is enabled. + +**Applying an Updated OpenAPI Description** + +To update just the OpenAPI description of your API in Tyk, you simply send the OpenAPI document in the payload to the `PATCH /api/apis/oas/{API-ID}` endpoint of your Tyk Gateway API. + +| Property | Description | +| :-------------- | :------------------------------------------ | +| Resource URL | `/api/apis/oas/{API-ID}` | +| Method | `PATCH` | +| Type | None | +| Body | OpenAPI document | +| Parameters | Path: `{API-ID}` | + +You need to specify which API to update - and do so using the `API-ID` value from the response you received from Tyk when creating the API. You can find this in the `x-tyk-api-gateway.info.id` field of the Tyk OAS API Definition stored in your main storage. + +**Applying an Updated Tyk OAS API Definition** + +To update the whole API in Tyk, you simply send the Tyk OAS API definition in the payload to the `PATCH /api/apis/oas/{API-ID}` endpoint of your Tyk Gateway API. + +| Property | Description | +| :-------------- | :------------------------------------------ | +| Resource URL | `/api/apis/oas/{API-ID}` | +| Method | `PATCH` | +| Type | None | +| Body | Tyk OAS API Definition | +| Parameters | Path: `{API-ID}` | + +You need to specify which API to update - and do so using the `API-ID` value from the response you received from Tyk when creating the API. You can find this in the `x-tyk-api-gateway.info.id` field of the Tyk OAS API Definition stored in your main storage. + +**Check request response** + +If the command succeeds, you will see the following response, where `Meta` contains the unique identifier (`id`) for the API you have just updated: + +```.json +{ + "Status": "OK", + "Message": "API modified", + "Meta": {API-ID} +} +``` + + + + + +When making calls to the Tyk Gateway API you'll need to set the domain name and port for your environment and provide credentials in the `x-tyk-authorization` field for Tyk to authorize your request, as follows: + +| Interface | Port | Authorization Header | Authorization credentials | +| :----------------- | :------ | :----------------------- | :---------------------------------- | +| Tyk Gateway API | 8080 | `x-tyk-authorization` | `secret` value set in `tyk.conf` | + + +**Applying an Updated OpenAPI Description** + +To update just the OpenAPI description of your API in Tyk, you simply send the OpenAPI document in the payload to the `PATCH /tyk/apis/oas/{API-ID}` endpoint of your Tyk Gateway API. + +| Property | Description | +| :-------------- | :------------------------------------------ | +| Resource URL | `/tyk/apis/oas/{API-ID}` | +| Method | `PATCH` | +| Type | None | +| Body | OpenAPI document | +| Parameters | Path: `{API-ID}` Query: `templateId` | + +You need to specify which API to update - and do so using the `API-ID` value from the response you received from Tyk when creating the API. You can find this in the `x-tyk-api-gateway.info.id` field of the Tyk OAS API Definition that Tyk has stored in the `/apps` folder of your Tyk Gateway installation. + + +**Applying an Updated Tyk OAS API Definition** + +To update the whole API in Tyk, you simply send the Tyk OAS API definition in the payload to the `PATCH /tyk/apis/oas/{API-ID}` endpoint of your Tyk Gateway API. + +| Property | Description | +| :-------------- | :------------------------------------------ | +| Resource URL | `/tyk/apis/oas/{API-ID}` | +| Method | `PATCH` | +| Type | None | +| Body | Tyk OAS API Definition | +| Parameters | Path: `{API-ID}` | + +You need to specify which API to update - and do so using the `API-ID` value from the response you received from Tyk when creating the API. You can find this in the `x-tyk-api-gateway.info.id` field of the Tyk OAS API Definition that Tyk has stored in the `/apps` folder of your Tyk Gateway installation. + + +**Check request response** + +If the command succeeds, you will see the following response, where `key` contains the unique identifier (`id`) for the API you have just updated: + +```.json +{ + "key": {API-ID}, + "status": "ok", + "action": "modified" +} +``` + +**Restart or hot reload** + +Once you have updated your API you need to load it into the Gateway so that it can serve traffic. To do this you can either restart the Tyk Gateway or issue a [hot reload](/tyk-stack/tyk-gateway/important-prerequisites#hot-reload-is-critical-in-tyk-ce) command: + +```.curl +curl -H "x-tyk-authorization: {your-secret}" -s http://{your-tyk-host}:{port}/tyk/reload/group +``` + + + + + + +### Exporting an API asset + +Each API on Tyk has an API definition comprising the OpenAPI description and the Tyk Vendor Extension. We offer the facility for you to export (download) two assets for an API - just the OpenAPI description, or the full Tyk OAS API definition. + +From Tyk 5.8.0, when using Tyk Dashboard these can be exported in either JSON or YAML format; for Tyk Gateway API users the assets can only be exported in JSON format. + + + + + +1. Start by selecting your API from the list on the **APIs** page in the **API Management** section. + +2. Select **Export API** from the **Actions** dropdown. + + Select Export API to download an asset from Tyk + +3. Now you can choose what you want to export, the filename (a default is offered which is based on the API Id) and the file format (JSON or YAML). + + Choosing what to download + +4. Finally select **Export** to save the file to your local machine. + + + + + +When making calls to the Tyk Dashboard API you'll need to set the domain name and port for your environment and provide credentials in the `Authorization` field for Tyk to authorize your request, as follows: + +| Interface | Port | Authorization Header | Authorization credentials | +| :------------------- | :------ | :---------------------- | :----------------------------- | +| Tyk Dashboard API | 3000 | `Authorization` | From Dashboard User Profile | + +You can obtain your authorization credential (Dashboard API key) from the Tyk Dashboard UI: + +- Select **Edit profile** from the dropdown that appears when you click on your username in the top right corner of the screen +- Scroll to the bottom of the page were you will see your **Tyk Dashboard API Access Credentials** + +You will also need to have β€˜admin’ or β€˜api:write’ permission if [RBAC](/api-management/user-management) is enabled. + +To export an API asset, you use the `GET /api/apis/oas/{API-ID}/export` endpoint, indicating whether you require the full Tyk OAS API definition or only the OpenAPI description using the `mode` parameter. + +| Property | Description | +| :-------------- | :-------------------------------------------------- | +| Resource URL | `/api/apis/oas/{API-ID}` | +| Method | `GET` | +| Type | None | +| Parameters | Path: `{API-ID}` Query: `mode` `Content-Type` | + +Where: +- `API-ID` is the unique `id` assigned in the Tyk Vendor Extension that identifies the API +- `mode` to identify the asset to export: `public` for the OpenAPI description (default empty for full API definition) +- `Content-Type` to select the format for the exported asset: `application/x-yaml` or `application/json` + + + + +When making calls to the Tyk Gateway API you'll need to set the domain name and port for your environment and provide credentials in the `x-tyk-authorization` field for Tyk to authorize your request, as follows: + +| Interface | Port | Authorization Header | Authorization credentials | +| :----------------- | :------ | :----------------------- | :---------------------------------- | +| Tyk Gateway API | 8080 | `x-tyk-authorization` | `secret` value set in `tyk.conf` | + +To export an API asset, you use the `GET /tyk/apis/oas/{API-ID}/export` endpoint, indicating whether you require the full Tyk OAS API definition or only the OpenAPI description using the `mode` parameter. + +| Property | Description | +| :-------------- | :-------------------------------------------- | +| Resource URL | `/tyk/apis/oas/{API-ID}` | +| Method | `GET` | +| Type | None | +| Parameters | Path: `{API-ID}` Query: `mode` | + +Where: +- `API-ID` is the unique `id` assigned in the Tyk Vendor Extension that identifies the API +- `mode=public` to export the OpenAPI description (otherwise, export the full API definition) + + + diff --git a/api-management/gateway-config-tyk-classic.mdx b/api-management/gateway-config-tyk-classic.mdx new file mode 100644 index 000000000..15f94cfd9 --- /dev/null +++ b/api-management/gateway-config-tyk-classic.mdx @@ -0,0 +1,661 @@ +--- +title: "Tyk Classic API Definition" +description: "How to configure Tyk Classic API Definition" +keywords: "Gateway, Configuration, Tyk Classic, Tyk Classic API Definition, Tyk Classic API Definition Object" +sidebarTitle: "Tyk Classic" +--- + +import ApiDefGraphql from '/snippets/api-def-graphql.mdx'; + +## Introduction to Tyk Classic + +Tyk's legacy API definition is now called Tyk Classic and is used for GraphQL, XML/SOAP and TCP services. + +From Tyk 5.8 we recommend that any REST APIs are migrated to the newer [Tyk OAS API](/api-management/gateway-config-tyk-oas) style, in order that they can benefit from simpler configuration and future enhancements. + +For Tyk Dashboard users with an existing portfolio of Tyk Classic API definitions, we provide a [migration tool](/api-management/migrate-from-tyk-classic), available via the Dashboard API and UI. + + + +For versions of Tyk prior to 5.8 not all Gateway features can be configured using the Tyk OAS API definition, for edge cases you might need to use Tyk Classic for REST APIs, though we recommend updating to Tyk 5.8 and adopting Tyk OAS. + + + +The Tyk Classic API definition has a flat structure that does not use the `omitempty` style, requiring all fields to be present even if set to null, resulting in a larger object than that for an equivalent Tyk OAS API definition. + +Note that there are some specific differences between Tyk Classic and Tyk OAS APIs, in particular with respect to [default authentication method](#configuring-authentication-for-tyk-classic-apis) and [API versioning](#tyk-classic-api-versioning). + +## Tyk Classic API versioning + +When multiple versions of a Tyk Classic API are created, the details are stored in a single API definition - unlike with Tyk OAS where a separate API definition is created for each version. The common configuration is stored in the root, whereas the details of the different versions are stored in a dedicated `version_data` object, within the API definition. + +Whilst this allows for easy management of all the API versions, it limits the number of features that can be configured differently between versions, as not all Gateway configuration options are duplicated in `version_data`. + +Tyk enforces strict access control to specific versions of APIs if these are specified in the access token (key). If, once Tyk has identified the API to load, and has allowed the access key through, it will check the access token's session data for access permissions. If it finds none, it will let the token through. However, if there are permissions and versions defined, it will be strict in **only** allowing access to that version. + +Key things to note when configuring versioning for a Tyk Classic API: + +- you must set `version_data.not_versioned` to `false` for Tyk to treat the API as versioned +- `version_data.default_version` must contain the `name` of the version that shall be treated as default (for access control and default fallback) +- you can use `version_data.paths` to configure endpoint-level ignore, allow and block lists (which can be used to configure a mock response) +- you must use `version_data.extended_paths` to configure other endpoint-level middleware +- common versioning configuration is mostly contained within the [definition](/api-management/gateway-config-tyk-classic#common-versioning-configuration) object +- configuration for the different versions is contained within the [version_data](/api-management/gateway-config-tyk-classic#version-specific-configuration) object + - this also contains some common configuration (`not_versioned` and `default_version`) + +When you first create an API, it will not be "versioned" (i.e. `not_versioned` will be set to `true`) and there will be a single version with the name `Default` created in the `version_data` section. + +### Common versioning configuration + +**Field: `definition`** +This object in the root of the Tyk Classic API definition handles information related to how Tyk should handle requests to the versioned API + +**Field: `definition.location`** +Used to configure where the versioning identifier should be provided, one of:`header`, `url`, `url-param`. + +**Field: `definition.key`** +The name of the key that contains the versioning identifier if `definition.location` is set to `header` or `url-param`. + +**Field: `definition.strip_versioning_data`** +Set this to `true` to remove the versioning identifier when creating the upstream (target) URL. + +**Field: `definition.fallback_to_default`** +Set this to `true` to invoke the default version if an invalid version is specified in the request. + +**Field: `definition.url_versioning_pattern`** +Available from Tyk 5.5.0, if you are have set both `definition.strip_versioning_data` and `definition.fallback_to_default` to `true` and are using `definition.location=url` you can configure this with a regex that matches the format that you use for the versioning identifier (`versions.{version-name}.name`) + +The following fields are either deprecated or otherwise not used for Tyk Classic API versioning and should be left with their default values: + +- `definition.default`: defaults to an empty string `""` +- `definition.enabled`: defaults to `false` +- `definition.name`: defaults to an empty string `""` +- `definition.strip_path`: deprecated field; defaults to `false` +- `definition.versions`: defaults to an empty array `{}` + +### Version specific configuration + +**Field: `version_data`** +This object contains the version status and configuration for your API + +**Field: `version_data.not_versioned`** +Set this to `false` to treat this as a versioned API. If you are not using versioning for this API you must have a single `Default` entry in the `version_data.versions` map. + +**Field: `version_data.default_version`** +Used to configure where the versioning identifier should be provided, one of:`header`, `url`, `url-param`. + +**Field: `version_data.versions`** +A list of objects that describe the versions of the API; there must be at least one (`Default`) version defined for any API (even non-versioned APIs). Each version of your API should be defined here with a unique `name`. + +**Field: `version_data.versions.{version-name}.name`** +An identifier for this version of the API, for example `Default` or `v1`. The value given here is what will Tyk will match against the value in the `definition.key`. + +**Field: `version_data.versions.{version-name}.expires`** +If a value is set then Tyk will automatically deprecate access to the API after the specified timestamp. The entry here takes the form of: `"YYYY-MM-DD HH:MM"`. If this is not set the version will never expire. + +**Field: `version_data.versions.{version-name}.paths`** +This object enables configuration of the basic allow list, block list and ignore authentication middleware for specific endpoints in the API version. You can also configure these and many other per-endpoint middleware using the `extended_paths` field. + +**Field: `version_data.versions.{version-name}.override_target`** +You can configure a different target URL here which will be used instead of the value stored in `proxy.target_url`, redirecting requests to a different hostname or domain. Note that this will also override (and so is not compatible with) upstream load balancing and Service Discovery, if configured for this API. + +**Field: `version_data.versions.{version-name}.global_headers`** +A `key:value` map of HTML headers to inject to the request. + +**Field: `version_data.versions.{version-name}.global_headers_remove`** +A list of HTML headers to remove from the request. + +**Field: `version_data.versions.{version-name}.global_size_limit`** +Apply a maximum size to the request body (payload) - in bytes. + +**Field: `version_data.versions.{version-name}.ignore_endpoint_case`** +If this boolean flag is set to `false`, Tyk will apply case sensitive matching of requests to endpoints defined in the API definition. + +**Field: `version_data.versions.{version-name}.use_extended_paths`** +Set this value to `true` if you want Tyk to apply specific middleware to endpoints in this version, configured using `version_data.versions.{version-name}.extended_paths`. + +**Field: `version_data.versions.{version-name}.extended_paths`** +This field contains a list of middleware configurations and to which paths they should be applied. The available middleware are: + +``` +{ + black_list[], + white_list[], + ignore[], + track_endpoints[], + do_not_track_endpoints[], + internal[], + method_transforms[], + transform[], + transform_headers[], + transform_response[], + transform_response_headers[], + size_limits[], + validate_json[], + url_rewrites[], + virtual[], + transform_jq[], + cache[], + hard_timeouts[], + circuit_breakers[] +} +``` + +Each entry must include the method and path (identifying the endpoint) where the middleware runs. You can find full documentation for each middleware in the [Traffic Transformation](/api-management/traffic-transformation) section including configuration instructions for the Tyk Classic API definition, for example the [allow list](/api-management/traffic-transformation/allow-list#api-definition-1). When using Tyk Classic, the mock response functionality is configured via the `black_list[]`, `white_list[]` or `ignore[]` middleware. + + +## Configuring authentication for Tyk Classic APIs + +Tyk Classic APIs *default to the auth token method* for authenticating requests. Flags in the API definition can be configured to enforce an alternative method: + +- keyless (no authentication of the client) +- basic authentication +- HMAC request signing +- Tyk as the OAuth 2.0 authorization server +- JWT authentication + +**Field: `use_keyless`** +This will switch off all key checking and open the API definition up, some analytics will still be recorded, but rate-limiting, quotas and security policies will not be possible (there is no session to attach requests to). This is a good setting for checking if Tyk works and is proxying traffic correctly. + +**Field: `auth`** +This object contains the basic configuration for the Auth (Bearer) Token method. + +**Field: `auth.auth_header_name`** +The header name (key) where Tyk should look for the token. + +**Field: `auth.use_param`** +Set this to true to instruct Tyk to expect the token in the URL parameter with key `auth.param_name`. + +**Field: `auth.param_name`** +The name of the URL parameter key containing the auth token. Note that this is case sensitive. + +**Field: `auth.use_cookie`** +Set this to true to instruct Tyk to expect the token in the URL parameter with key `auth.cookie_name`. + +**Field: `auth.cookie_name`** +The name of the cookie containing the auth token. Note that this is case sensitive. + +**Field: `auth.use_certificate`** + + +**Field: `auth.validate_signature`** +Boolean value set to `true` to enable Auth Token Signature Validation + +**Field: `auth.signature`** +Configuration for Auth Token Signature Validation + +**Field: `auth.signature.algorithm`** +The algorithm you wish to validate the signature against. Options are: +- `MasherySHA256` +- `MasheryMD5` + +**Field: `auth.signature.header`** +Header key for attempted signature + +**Field: `auth.signature.secret`** +The shared secret which was used to sign the request +- this can hold a dynamic value, by referencing `$tyk_meta` or `$tyk_context` variables. +- for example: if you have stored the shared secret in the field `individual_secret` of the session token's meta-data you would use the value `"secret": "$tyk_meta.individual_secret"`. + +**Field: `auth.signature.allowed_clock_skew`** +Maximum permitted deviation in seconds between UNIX timestamp of Tyk & UNIX timestamp used to generate the signed request + +**Field: `use_basic_auth`** +This method will enable basic auth as specified by the HTTP spec, an API with this flag set will request for a username and password and require a standard base64 Authentication header to be let through. + +**Field: `basic_auth.disable_caching`** +This disables the caching of basic authentication keys. + +**Field: `basic_auth.cache_ttl`** +This is the refresh period for the basic authentication key cache (in seconds). + +**Field: `enable_signature_checking`** +If this option is set to `true`, Tyk will implement the HMAC signing standard as proposed in the [HTTP Signatures Spec](https://web-payments.org/specs/ED/http-signatures/2014-02-01/#page-3). In particular the structure of the Authorization header and the encoding method need to be taken into account. +- this method will use a session key to identify a user and a user secret that should be used by the client to sign each request's `date` header +- it will also introduce clock skew checks, requests outside of 300ms of the system time will be rejected +- it is not recommended for Single-Page-Webapps (SPA) or Mobile apps due to the fact that secrets need to be distributed + +**Field: `hmac_allowed_algorithms`** +Tyk supports the following HMAC algorithms: β€œhmac-sha1", "hmac-sha256", "hmac-sha384", "hmac-sha512”. You can limit which ones you want to support with this option. For example, [β€œhmac-sha256”] + +**Field: `hmac_allowed_clock_skew`** +Set this value to anything larger than `0` to set the number of milliseconds that will be tolerated for clock skew. Set to `0` to prevent clock skew checks on requests (only in HMAC mode, i.e. when `enable_signature_checking` is set to `true`). + +**Field: `use_oauth2`** +This authentication method will use Tyk as the OAuth 2.0 Authorization Server. Enabling this option will cause Tyk to add OAuth2-standard endpoints to the API for `/authorize` and `/token`, these will supersede any other requests to your proxied system in order to enable the flow. + +**Field: `oauth_meta.allowed_access_types`** +This is a string array of OAuth access options depending on the OAuth grant types to be supported. Valid options are: +- `authorization_code` - client has an authorization code to request a new access token. +- `refresh_token` - client can use a refresh token to refresh expired bearer access token. + +**Field: `oauth_meta.allowed_authorize_types`** +This is a string array of OAuth authorization types. Valid options are: +- `code` - Client can request an authorization code which can be used to request an access code via a server request (traditionally reserved for server-side apps). +- `token` - Client can request an access token directly, this will not enable refresh tokens and all tokens have a 12 hour validity. Recommended for mobile apps and single-page webapps. + +**Field: `oauth_meta.auth_login_redirect`** +The Tyk OAuth flow has a dummy (intercept) `/authorize` endpoint which basically redirects the user to your login and authentication page, it will also send along all OAuth data as part of the request (so as to mimic a regular app flow). This is the URL that the user will be sent to (via `POST`). + +**Field: `notifications`** +When Tyk is used as the OAuth 2.0 Authorization Server, because it will handle access requests on your behalf once authorization codes have been issued, it will need to notify your system that these have occurred. It will `POST` key data to the URL set in these options to ensure that your system is synchronised with Tyk. + +**Field: `notifications.shared_secret`** +Posted data to your service will use this shared secret as an authorization header. This is to ensure that messages being received are from Tyk and not from another system. + +**Field: `notifications.oauth_on_keychange_url`** +The URL that will be sent the updated information - the URL will be polled up to 3 times if there is a communications failure. On a `200 OK` response it stops. + +**Field: `auth_configs`** +This section allows definition of multiple chained authentication mechanisms that will be applied to requests to the API, with distinct authentication headers identified for the different auth modes. + +For example: + +```json +{ + "auth_configs": { + "authToken": { "auth_header_name": "My-Auth-Header-Key" }, + "basic": { "auth_header_name": "My-Basic-Auth-Header-Key" } + } +} +``` + +**Field: `base_identity_provided_by`** +This enables multiple authentication and indicates which authentication method provides the session object that determines access control, rate limits and usage quotas. + +It should be set to one of the following: + +- `auth_token` +- `hmac_key` +- `basic_auth_user` +- `jwt_claim` +- `oidc_user` +- `oauth_key` +- `custom_auth` + +**Field: `enable_jwt`** +Set JWT as the authentication method for this API. + +**Field: `jwt_signing_method`** +Either HMAC or RSA - HMAC requires a shared secret while RSA requires a public key to use to verify against. Please see the section on JSON web tokens for more details on how to generate these. + +**Field: `jwt_source`** +Must be a base64 encoded valid RSA, ECDSA or HMAC key or the full address of a JSON Web Key Set (JWKS) endpoint. This key (or the JWKS retrieved from the endpoint) will be used to validate inbound JWT and throttle them according to the centralised JWT options and fields set in the configuration. See [JWT signature validation](/basic-config-and-security/security/authentication-authorization/json-web-tokens#remotely-stored-keys-jwks-endpoint) for more details on using a JWKS endpoint. + +**Field: `jwt_identity_base_field`** +Identifies the user or identity to be used in the Claims of the JWT. This will fallback to `sub` if not found. This field forms the basis of a new "virtual" token that gets used after validation. It means policy attributes are carried forward through Tyk for attribution purposes. + +Centralised JWTs add a `TykJWTSessionID` to the session metadata on create to enable upstream hosts to work with the internalised token should things need changing. + +**Field: `jwt_policy_field_name`** +The policy ID to apply to the virtual token generated for a JWT. + +**Field: `jwt_issued_at_validation_skew`** +Prevent token rejection due to clock skew between servers for Issued At claim (seconds, default: 0) + +**Field: `jwt_expires_at_validation_skew`** +Prevent token rejection due to clock skew between servers for Expires At claim (seconds, default: 0) + +**Field: `jwt_not_before_validation_skew`** +Prevent token rejection due to clock skew between servers for Not Before claim (seconds, default: 0) + +## GraphQL specific fields + + + +## General features + +### API identification + +**Field: `api_id`** +The identifier for the API This should be unique, but can actually be any kind of string. For single-instance setups this can probably be set to `1`. It is recommended to make this a UUID. The `api_id` is used to identify the API in queries to the Tyk Gateway API or Tyk Dashboard API. + +**Field: `name`** +Human readable name of the API. It is used for identification purposes but does not act as an index. + +**Field: `org_id`** +This is an identifier that can be set to indicate ownership of an API key or of an individual API. If the Org ID is set (recommended), it is prepended to any keys generated by Tyk - this enables lookups by prefixes from Redis of keys that are in the system. + +**Field: `domain`** +The domain to bind this API to. Multiple APIs can share the same domain, so long as their listen paths are unique. +This domain will affect your API only. To set up the portal domain for your organization, please register it in the main Tyk Dashboard settings file. +Your Tyk Gateway can listen on multiple domains/subdomains through the use of regular expressions, more precisely the RE2 Syntax. They are defined using the format `{name}` or `{name:pattern}`. + * `www.example.com` Matches only if domain is www.example.com + * `{subdomain:[a-z]+}.example.com` Matches dynamic subdomain + * `{subdomain:foo|bar}.example.com` will listen on foo.example.com and bar.example.com" + +**Field: `ignore_endpoint_case`** +If set to `true` when matching the URL path for requests to this API, the case of the endpoint path will be ignored. So for an API `my-api` and the endpoint `getuser`, requests to all of the following will be matched: + + * `/my-api/getuser` + * `/my-api/getUser` + * `/my-api/GetUser` + +If set to true, this will override the endpoint level settings in [Ignore](/api-management/traffic-transformation/ignore-authentication#case-sensitivity), [Allowlist](/api-management/traffic-transformation/allow-list#case-sensitivity) and [Blocklist](/api-management/traffic-transformation/block-list#case-sensitivity) middleware. This setting can be overriden at the Tyk Gateway level, and so applied to all APIs, by setting `ignore_endpoint_case` to `true` in your `tyk.conf` file. See [ignore_endpoint_case](/tyk-oss-gateway/configuration#ignore_endpoint_case) for details. + +**Field: `enable_batch_request_support`** +Set to true to enable batch support + +**Field: `id`** +This is allocated by Tyk to locate the API definition in the Dashboard main storage and bears no actual relation to the identity of the API. + +**Field: `active`** +This field is used by Tyk Dashboard to control whether the API will serve traffic. If set to `false` then on Gateway start, restart or reload, the API will be ignored and all paths and routes for that API will cease to be proxied. Any keys assigned to it will still exist, though they will not be let through for that particular API. + +**Field: `internal`** +This field controls the exposure of the API on the Gateway. When set to `true`, the API will not be made available for external access and will not be included in API listings returned by the Gateway's management APIs; it will be accessible only via [internal looping](/advanced-configuration/transform-traffic/looping). + +### Access token management + +**Field: `session_lifetime`** +The session (API access key/token) lifetime will override the expiry date if it has been set on a key (in seconds). for example, if a key has been created that never expires, then it will remain in the session cache forever unless manually deleted. If a re-auth needs to be forced or a default expiry needs to be applied to all keys, then use this feature to set the session expiry for an entire API. + +**Field: `session_lifetime_respects_key_expiration`** +If this is set to `true` and the key expiration date is less than the `session_lifetime`, the key expiration value will be set to `session_lifetime`. Don't forget that the key expiration is set in unix timestamp but `session_lifetime` is set in seconds. Also, `session_lifetime_respects_key_expiration` exists in the global config too. When the global one is set to `true`, the one set at the API level will be ignored. + +**Field: `dont_set_quota_on_create`** +If set to true, when the keys are created, edited or added for this API, the quota cache in Redis will not be reset. + +### Traffic logs + +**Field: `enable_detailed_recording`** +If this value is set to `true`, the Gateway will record the request and response payloads in traffic logs. + +**Field: `do_not_track`** +If this value is set to `true`, the Gateway will not generate traffic logs for requests to the API. + +**Field: `tag_headers`** +This specifies a string array of HTTP headers values which turned into tags. For example, if you include the `X-Request-ID` header to `tag_headers`, for each incoming request it will include an `x-request-id-` tag to request an analytic record. This functionality can be useful if you need analytics for request headers without the body content (Enabling detailed logging is another option, but it records the full request and response objects and consumes a lot more space). + +**Field: `expire_analytics_after`** +This value (in seconds) will be used to indicate a TTL (ExpireAt) for the retention of analytics created from traffic logs generated for this API that are stored in MongoDB. If using an alternative analytics storage solution that does not respect ExpireAt then you must manage the record TTL separately. + +### OpenTelemetry + +**Field: `detailed_tracing`** +If this value is set to `true`, the Gateway will generate detailed OpenTelemetry spans for requests to the API. + +### API Level Rate Limits + +**Field: `global_rate_limit`** +The [API-level rate limit](/api-management/rate-limit#rate-limiting-layers) aggregates the traffic coming into an API from all sources and ensures that the overall rate limit is not exceeded. It is composed of a `rate` (number of requests) and `per` (interval). If either is set to `0` then no API-level limit is applied. + +**Field: `disable_rate_limit`** +If set to `true`, all rate limits are disabled for the specified API (both API-level and key-level) + +### Event handlers + +**Field: `event_handlers`** +This adds the ability to configure an API with event handlers to perform specific actions when an event occurs. + +**Field: `events`** +Each event handler that is added to the event_handlers.events section, is mapped by the event type, and then a list of each handler configuration, defined by the handler name and the handler metadata (usually some kind of configurable options for the specific handler) + +### Custom data + +**Field: `enable_context_vars`** +Context variables are extracted from the request at the start of the middleware chain, and must be explicitly enabled in order for them to be made available to your transforms. These values can be very useful for later transformation of request data, for example, in converting a Form-based POST into a JSON-based PUT or to capture an IP address as a header. + +**Field: `config_data`** +You can use this field to pass custom attributes to the virtual endpoint middleware. It is a list of key:value pairs. + +### IP Access Control + +**Field: `enable_ip_whitelisting`** +This works with the associated `allowed_ips` list and, when set to `true`, accepts only requests coming from the defined list of allowed IP addresses. + +**Field: `allowed_ips`** +A list of strings that defines the IP addresses (in [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)) that are allowed access via Tyk. This list is explicit and wildcards are not supported. + +**Field: `enable_ip_blacklisting`** +This works with the associated `blacklisted_ips` list and, when set to `true`, rejects and requests coming from the defined list of blocked IP addresses. + +**Field: `blacklisted_ips`** +A list of strings that defines the IP addresses (in [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)) that are blocked access via Tyk. This list is explicit and wildcards are not supported. + +### Cross-Origin Resource Sharing (CORS) + +**Field: `CORS.enable`** +Enable CORS for the API + +**Field: `CORS.allowed_origin`** +A list of origin domains to allow access from. Wildcards are also supported, e.g. http://*.foo.com + +**Field: `CORS.allowed_methods`** +A list of HTTP methods to allow access via. + +**Field: `CORS.allowed_headers`** +Headers that are allowed within a request. + +**Field: `CORS.exposed_headers`** +Headers that are exposed back in the response. + +**Field: `CORS.allow_credentials`** +Whether credentials (cookies) should be allowed. + +**Field: `CORS.max_age`** +Maximum age of credentials. + +**Field: `CORS.options_passthrough`** +Allow CORS OPTIONS preflight request to be proxied directly to upstream, without authentication and the rest of the checks. This means that pre-flight requests generated by web-clients such as SwaggerUI will be able to test the API using trial keys. If your service handles CORS natively, then enable this option. + +### Proxy Transport Settings + +**Field: `proxy.preserve_host_header`** +Set to `true` to preserve the host header. If `proxy.preserve_host_header` is set to `true` in an API definition then the host header in the outbound request is retained to be the inbound hostname of the proxy. + +**Field: `proxy.listen_path`** +The path to listen on, e.g. `/api` or `/`. Any requests coming into the host, on the port that Tyk is configured to run on, that go to this path will have the rules defined in the API Definition applied. Versioning assumes that different versions of an API will live on the same URL structure. If you are using URL-based versioning (e.g. `/v1/function`, `/v2/function/`) then it is recommended to set up a separate non-versioned definition for each version as they are essentially separate APIs. + +Proxied requests are literal, no re-writing takes place, for example, if a request is sent to the listen path of: `/listen-path/widgets/new` and the URL to proxy to is `http://your.api.com/api/` then the *actual* request that will land at your service will be: `http://your.api.com/api/listen-path/widgets/new`. + +This behavior can be circumvented so that the `listen_path` is stripped from the outgoing request. See the section on `strip_listen_path` below. + +**Field: `proxy.strip_listen_path`** +By setting this to `true`, Tyk will attempt to replace the `listen-path` in the outgoing request with an empty string. This means that in the above scenario where `/listen-path/widgets/new` and the URL to proxy to is `http://your.api.com/api/` becomes `http://your.api.com/api/listen-path/widgets/new`, actually changes the outgoing request to be: `http://your.api.com/api/widgets/new`. + +**Field: `proxy.target_url`** +This defines the target URL that the request should be proxied to if it passes all checks in Tyk. + +**Field: `proxy.disable_strip_slash`** +This boolean option allows you to add a way to disable the stripping of the slash suffix from a URL. + +**Field: `proxy.enable_load_balancing`** +Set this value to `true` to have a Tyk node distribute traffic across a list of servers. **Required: ** You must fill in the `target_list` section. + +**Field: `proxy.target_list`** +A list of upstream targets for load balancing (can be one or many hosts). + +**Field: `proxy.check_host_against_uptime_tests`** +If uptime tests are enabled, Tyk will check the hostname of the outbound request against the downtime list generated by the host checker. If the host is found, then it is skipped. + +**Field: `proxy.service_discovery`** +The service discovery section tells Tyk where to find information about the host to proxy to. In a clustered environment this is useful if servers are coming online and offline dynamically with new IP addresses. The service discovery module can pull out the required host data from any service discovery tool that exposes a RESTful endpoint that outputs a JSON object. + +```json +{ + "enable_load_balancing": true, + "service_discovery": { + "use_discovery_service": true, + "query_endpoint": "http://127.0.0.1:4001/v2/keys/services/multiobj", + "use_nested_query": true, + "parent_data_path": "node.value", + "data_path": "array.hostname", + "port_data_path": "array.port", + "use_target_list": true, + "cache_timeout": 10 + }, +} +``` + +**Field: `proxy.service_discovery.use_discovery_service`** +Set this to `true` to enable the discovery module. + +**Field: `proxy.service_discovery.query_endpoint`** +The endpoint to call. + +**Field: `proxy.service_discovery.data_path`** +The namespace of the data path. For example, if your service responds with: + +```json +{ + "action": "get", + "node": { + "key": "/services/single", + "value": "http://httpbin.org:6000", + "modifiedIndex": 6, + "createdIndex": 6 + } +} +``` + +Then your name space would be `node.value`. + +**Field: `proxy.service_discovery.use_nested_query`** +Sometimes the data you are retrieving is nested in another JSON object. For example, this is how Etcd responds with a JSON object as a value key: + +```json +{ + "action": "get", + "node": { + "key": "/services/single", + "value": "{\"hostname\": \"http://httpbin.org\", \"port\": \"80\"}", + "modifiedIndex": 6, + "createdIndex": 6 + } +} +``` + +In this case, the data actually lives within this string-encoded JSON object. So in this case, you set the `use_nested_query` to `true`, and use a combination of the `data_path` and `parent_data_path` (below) + +**Field: `proxy.service_discovery.parent_data_path`** +This is the namespace of where to find the nested value. In the above example, it would be `node.value`. You would then change the `data_path` setting to be `hostname`. Tyk will decode the JSON string and then apply the `data_path` namespace to that object in order to find the value. + +**Field: `proxy.service_discovery.port_data_path`** +In the above nested example, we can see that there is a separate PORT value for the service in the nested JSON. In this case you can set the `port_data_path` value and Tyk will treat `data_path` as the hostname and zip them together (this assumes that the hostname element does not end in a slash or resource identifier such as `/widgets/`). In the example, the `port_data_path` would be `port`. + +**Field: `proxy.service_discovery.target_path`** +The target path to append to the host:port combination provided by the service discovery engine. + +**Field: `proxy.service_discovery.use_target_list`** +If you are using load_balancing, set this value to `true` and Tyk will treat the data path as a list and inject it into the target list of your API definition. + +**Field: `proxy.service_discovery.cache_timeout`** +Tyk caches target data from a discovery service. In order to make this dynamic you can set a cache value when the data expires and new data is loaded. + +**Field: `proxy.transport`** +The transport section allows you to specify a custom proxy and set the minimum TLS versions and any SSL ciphers. + +This is an example of `proxy.transport` definition followed by explanations for every field. +```json +{ + "transport": { + "proxy_url": "http(s)://proxy.url:1234", + "ssl_min_version": 771, + "ssl_ciphers": [ + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" + ], + "ssl_insecure_skip_verify": true, + "ssl_force_common_name_check": false + } +} +``` + +**Field: `proxy.transport.proxy_url`** +Use this setting to specify your custom forward proxy and port. + +**Field: `proxy.transport.ssl_min_version`** +Use this setting to specify your minimum TLS version; note that this is limited by the version of Tyk due to underlying Golang support for legacy TLS versions. + +**Field: `proxy.transport.ssl_ciphers`** +You can add `ssl_ciphers` which takes an array of strings as its value. Each string must be one of the allowed cipher suites as defined at https://golang.org/pkg/crypto/tls/#pkg-constants. This is not applicable from TLS 1.3. + +**Field: `proxy.transport.ssl_insecure_skip_verify`** +Boolean flag to control at the API definition whether it is possible to use self-signed certs for some APIs, and actual certs for others. This also works for `TykMakeHttpRequest` & `TykMakeBatchRequest` in virtual endpoints. + +**Field: `proxy.transport.ssl_force_common_name_check`** +Use this setting to force the validation of a hostname against the certificate Common Name. + +### Upstream Authentication + +**Field: `strip_auth_data`** +When set to `true`, auth related headers will be stripped from requests proxied through the gateway. + +**Field: `request_signing`** +Configuration for Upstream Request Signing using HMAC or RSA algorithms. + +**Field: `request_signing.secret`** +The secret used for signing (not shared with the upstream). + +**Field: `request_signing.key_id`** +An identifier allocated by the upstream used to identify Tyk as the requesting client. + +**Field: `request_signing.algorithm`** +The signing algorithm to be used - one from `hmac-sha1`, `hmac-sha256`, `hmac-sha384`, `hmac-sha512`, `hmac-rsa256` + +**Field: `request_signing.header_list`** +A list of headers to be included in the signature calculation. + +**Field: `request_signing.certificate_id`** +The certificate ID used in the RSA signing operation. + +**Field: `request_signing.signature_header`** +The HTTP header to be used to pass the signature to the upstream. + +### Uptime Tests + +**Field: `uptime_tests`** +This section defines the uptime tests to run for this API. + +**Field: `uptime_tests.check_list`** +A list of tests to run, which can be either short form: + +```json +{ + "uptime_tests": { + "check_list": [ + { + "url": "http://google.com/" + } + ] + } +} +``` + +or long form: + +```json +{ + "uptime_tests": { + "check_list": [ + { + "url": "http://posttestserver.com/post.php?dir=uptime-checker", + "method": "POST", + "headers": { + "this": "that", + "more": "beans" + }, + "body": "VEhJUyBJUyBBIEJPRFkgT0JKRUNUIFRFWFQNCg0KTW9yZSBzdHVmZiBoZXJl", + "timeout": 1000 + } + ] + } +} +``` + +**Field: `uptime_tests.check_list.url`** +The URL to be used for the uptime test. + +**Field: `uptime_tests.check_list.method`** +The HTML method to be used for the request to the `check_list.url` (required for long form tests). + +**Field: `uptime_tests.check_list.headers`** +A list of headers to be applied to the request to the `check_list.url` as key:value pairs (only for long form tests). + +**Field: `uptime_tests.check_list.body`** +The body of the request to be sent to the `check_list.url`, this is Base64 encoded (only for long form tests). + +**Field: `uptime_tests.check_list.timeout`** +The timeout in milliseconds for the uptime check (only for long form tests). + diff --git a/api-management/gateway-config-tyk-oas.mdx b/api-management/gateway-config-tyk-oas.mdx new file mode 100644 index 000000000..568f32459 --- /dev/null +++ b/api-management/gateway-config-tyk-oas.mdx @@ -0,0 +1,64 @@ +--- +title: "Tyk OAS" +description: "How to configure Tyk OAS API Definition" +keywords: "Gateway, Configuration, Tyk OAS, Tyk OAS API Definition, Tyk OAS API Definition Object" +sidebarTitle: "Tyk OAS" +--- + +import XTykGateway from '/snippets/x-tyk-gateway.mdx'; + +## Introduction to Tyk OAS + +The upstream service receives requests from Tyk to the *upstream API* after processing based on the configuration applied in the Tyk API definition. Crucially the upstream service remains unaware of Tyk Gateway's processing, responding to incoming requests as it would for direct client-to-service communication. The *API proxy* deployed on Tyk is typically designed to have the same API endpoints, resources and methods that are defined for the upstream service's API. The *upstream API* will often be described according to the industry standard OpenAPI Specification - and this is where Tyk OAS comes in. + +### What is the OpenAPI Specification? + +The *OpenAPI Specification* (OAS) is a standardized framework for describing RESTful APIs in a machine-readable format (typically JSON or YAML). It defines how APIs should be documented, including details about endpoints, request/response formats, authentication, and error codes. In short, OAS is a blueprint for your APIβ€”detailing how the API behaves and how users or services can interact with it. The *OpenAPI Description* (OAD) is the actual content that adheres to this specification, essentially an object that describes the specific functionality of an API. The *OpenAPI Document* refers to a file that contains an OpenAPI description, following the OAS format. + +OpenAPI has become the de facto standard for API documentation because of its consistency, ease of use, and broad tooling support. It allows both developers and machines to interact with APIs more effectively, offering benefits like auto-generated client SDKs, server stubs, and up-to-date documentation. Tools such as Tyk also support validation, testing, and mock servers, which speeds up development and ensures consistency across API implementations. + +Tyk supports [OpenAPI Specification v3.0.x](https://spec.openapis.org/oas/v3.0.3). + +### What is a Tyk OAS API definition? + +Not every feature of an advanced API management platform such as Tyk is covered by the OpenAPI Specification. The *API definition* must provide Tyk with everything it needs to receive and process requests on behalf of the upstream service - so the OpenAPI description of the upstream API is not enough on its own to configure the Gateway. This is where the *Tyk Vendor Extension* comes in, allowing you to configure all the powerful features of Tyk Gateway that are not covered by OAS. + +The [Tyk Vendor Extension](#tyk-vendor-extension) follows the same architectural style as the OpenAPI Specification and is encapsulated in a single object that is appended to the OpenAPI description, creating a *Tyk OAS API definition*. + +#### OpenAPI description + +There are many great explanations of the features and capabilities of the OpenAPI Specification so we won't repeat it all here. A good place to start learning is from the maintainers of the specification: the [OpenAPI Initiative](https://learn.openapis.org/). The minimal set of elements that must be defined + +Tyk treats the OpenAPI description as the source of truth for the data stored within it. This means that Tyk does not duplicate those data in the Tyk Vendor Extension but rather builds upon the basic configuration defined in the OAD. + +#### Tyk Vendor Extension + +The Tyk Vendor Extension is a JSON object (`x-tyk-api-gateway`) within the Tyk OAS API definition that encapsulates all of the Gateway configuration that is not contained within the OpenAPI description. + +It is structured in four sections: + +- `info` containing metadata used by Tyk to manage the API proxy, including name, identifiers, status, and version +- `server` contains configuration for the client-gateway integration, including listen path and authentication method +- `middleware` contains configuration for the gateway's middleware chain, split into API-level and endpoint-level settings +- `upstream` contains configuration for the gateway-upstream integration, including targets, load balancing and rate limits + +The extension has been designed, as has OAS, to have minimal content so if a feature is not required for your API (for example, payload transformation) then this can be omitted from the API definition. Most features have an `enabled` flag which must be set for Tyk to apply that configuration. This can be used to include settings in the API definition and enable them only when required (useful during API development, testing and debug). + +In the OpenAPI Specification *paths* define the API endpoints, while *operations* specify the HTTP methods (GET, POST, PUT, DELETE) and actions for each endpoint. They describe how the API handles requests, including parameters, request bodies, responses, and status codes, providing a clear structure for API interactions. Tyk interprets this information directly from the OpenAPI description and uses the `operationID` field to link the endpoint level middleware configuration within the Tyk Vendor Extension to the appropriate endpoint. + +### Modifying the OpenAPI description + +Tyk will only make additions or modifications to the OAD when the user makes certain changes in the Tyk API Designer and as follows: + +- The URL on Tyk Gateway to which client requests should be sent will be added at the first location in the `servers` list +- The OpenAPI Specification declares `paths` which describe the available endpoints (paths) and the operations that can be performed on them (such as `GET`, `POST`, `PUT`, `DELETE`). Tyk will modify this list if changes are made using the Tyk API Designer, for example if an endpoint is added. + +Where Tyk might modify the OpenAPI description, this is noted in the appropriate section of the documentation. + +If changes are made via the Tyk API Designer that impact the OpenAPI description, we recommend that you export the OAD from Tyk to store in your source of truth repository. This ensures that your records outside Tyk accurately reflect the API that is consumed by your clients (for example, if you publish documentation from the OpenAPI Specification of your API). + +Equally, if you make changes to your OpenAPI description outside Tyk, we provide a simple method to update (or patch) your Tyk API definition with the updated OAD. Alternatively you might prefer to create a new version of your API for the updated OpenAPI description, depending on the current stage of the API in its lifecycle. + + + + diff --git a/api-management/gateway-events.mdx b/api-management/gateway-events.mdx new file mode 100644 index 000000000..136864ad8 --- /dev/null +++ b/api-management/gateway-events.mdx @@ -0,0 +1,957 @@ +--- +title: "Gateway Events" +description: "Introduction to Gateway Events" +keywords: "Gateway, Events, Async APIs, Asynchronus APIs, Error Templates, Event Types, Event Webhooks, Event Metadata" +sidebarTitle: "Gateway Events" +--- + +Tyk Gateway will generate asynchronous events when certain conditions are met, for example a rate limit being exceeded, an expired key attempting to access an API, or a circuit breaker triggering due to a slow or unresponsive upstream. + +Tyk has a flexible model for handling these API events. + +## Event categories + +There are four different categories of events that can be fired by Tyk: +- [API events](#api-events) +- [Token lifecycle events](#token-lifecycle-events) +- [Advanced quota usage events](#advanced-quota-usage-events) +- [Custom events](#custom-events) + +### API events + +Tyk can generate (or *fire*) a variety of built-in API events due to activity triggered by an API request, such as exceeded rate limits, depleted quotas or attempts to access using expired keys. The full list of standard API events is available [here](/api-management/gateway-events#api-events). + +### Token lifecycle events + +Alongside the events that are fired in response to API requests, Tyk will also mark the creation, update or deletion of access tokens (keys) with dedicated events as indicated [here](/api-management/gateway-events#token-lifecycle-events). + +### Advanced quota usage events + +Tyk will generate [standard quota events](/api-management/gateway-events#standard-quota-events) when a client quota has been consumed, but what if you want to have more granular notification of quota usage as your clients are approaching their quota limit? + +For this, Tyk provides [advanced quota monitoring](/api-management/gateway-events#monitoring-quota-consumption) that can be configured to trigger a dedicated event handler when the API usage exceeds different thresholds approaching the quota limit. + +### Custom events + +The event subsystem has been designed to be easily extensible, so the community can define additional events within the Tyk codebase which can then be handled using the exsiting event handling system. + +## Handling events with Tyk + +Tyk has a simple event handling system where *event handlers* are assigned (or registered) to the different [events](/api-management/gateway-events#event-types) that Tyk can generate. These handlers are assigned per-API so when an event is generated for an API and there is an *event handler* registered for that *event*, the handler will be triggered. + +Three different categories of *event handler* can be registered for each event: +- a [webhook](/api-management/gateway-events#event-handling-with-webhooks) that will call out to an external endpoint +- an [event log](/api-management/gateway-events#logging-api-events-1) that will write to the configured [log output](/api-management/logs-metrics#system-logs) +- your own [custom event handler](/api-management/gateway-events#custom-api-event-handlers) that will run in a JavaScript virtual machine on the Tyk server + + + + + Remember that quota usage monitoring has a [dedicated mechanism](/api-management/gateway-events#monitoring-quota-consumption) for handling these special events. + + + +## Event Types + +The built-in events that Tyk Gateway will generate are: + +### Rate limit events + +- `RatelimitExceeded`: the rate limit has been exceeded for a specific key +- `OrgRateLimitExceeded`: the rate limit has been exceeded for a specific organization +- `RateLimitSmoothingUp`: the [intermediate rate limit allowance](/api-management/rate-limit#rate-limit-smoothing) has been increased for a specific key +- `RateLimitSmoothingDown`: the [intermediate rate limit allowance](/api-management/rate-limit#rate-limit-smoothing) has been decreased for a specific key + +### Standard quota events + +- `QuotaExceeded`: the quota for a specific key has been exceeded +- `OrgQuotaExceeded`: the quota for a specific organization has been exceeded + +### Authentication failure events + +- `AuthFailure`: a key has failed authentication or has attempted access and was denied +- `KeyExpired`: an attempt has been made to access an API using an expired key +- `UpstreamOAuthError`: an error occurred when trying to authenticate with an upstream using an OAuth provider + +### API version events + +- `VersionFailure`: a key has attempted access to a version of an API that it does not have permission to access + +### Circuit breaker events + +- `BreakerTripped`: a circuit breaker on a path has tripped and been taken offline +- `BreakerReset`: a circuit breaker has reset and the path is available again +- `BreakerTriggered`: a circuit breaker has changed state, this is generated when either a `BreakerTripped`, or a `BreakerReset` event occurs; a status code in the metadata passed to the webhook will indicate which of these events was triggered + +### Uptime events + +- `HostDown`: the uptime checker has found that a host is down/not available +- `HostUp`: the uptime checker has found that a host is available again after being offline + +### Token lifecycle events + +- `TokenCreated`: a token has been created +- `TokenUpdated`: a token has been changed/updated +- `TokenDeleted`: a token has been deleted + +### Certificate expiry events + +- `CertificateExpiringSoon`: a certificate has been used within the expiry threshold and should be updated +- `CertificateExpired`: an expired certificate has been used in a request + +## Event Metadata + +When an event is fired, and an *event handler* is registered for that specific API and event combination, Tyk Gateway provides the handler with a rich set of [metadata](/api-management/gateway-events#event-metadata). The external system (webhook) or custom (JavaScript) code can then use this metadata to decide what action to take. + +Most events provide common metadata as follows: + +- `message` (string): a human-readable message from Tyk Gateway that provides details about the event +- `path` (string): the path of the API endpoint request that led to the event being fired +- `origin` (string): origin data for the source of the request (if this exists) +- `key` (string): the key that was used in the request +- `originating_request` (string): a Base64-encoded [raw inbound request](#raw-request-data) + +### Specific Event Metadata + +Some events provide alternative metadata specific to that event. The following sections detail the event-specific metadata provided for such events. + +
    + +
  • + +- `message` (string): a human readable message from Tyk Gateway that adds detail about the event +- `cert_id` (string): the certificate ID +- `cert_name` (string): the name of the certificate +- `expires_at` (string, RFC3339): the certificate expiry date +- `days_remaining` (integer): the remaining days until the certificate expires +- `api_id`(string): the ID of the API that triggered the event + +
  • + +
  • + +- `message` (string): a human readable message from Tyk Gateway that adds detail about the event +- `cert_id` (string): the certificate ID +- `cert_name` (string): the name of the certificate +- `expired_at` (string, RFC3339): the date when the certificate expired +- `days_since_expiry` (integer): the number of days since the certificate expired +- `api_id`(string): the ID of the API that triggered the event + +
  • + +
+ +### Using the metadata + +The metadata are exposed so that they can be used by the event handler (webhook or custom) using Go templating. For details of how each type of event handler can access these data, please see the appropriate section for [webhook](/api-management/gateway-events#webhook-payload) or [custom](/api-management/gateway-events#the-event-object) event handlers. + +### Raw Request Data + +The `OriginatingRequest` metadata is a Base64-encoded wire-protocol representation of the original request to the event handler. If you are running a service bus or queue that stores failed, throttled or other types of requests, you can decode this object and parse it in order to re-create the original intent of the request (e.g. for post-processing). + +### Logging API Events + +Tyk’s built-in logging event handler is designed primarily for debugging purposes and will store details of an API event to the configured logger output. + +The Tyk platform can be configured to log at various verbosity levels (info, debug, warn, error) and can be integrated with third-party log aggregation tools like Sentry, Logstash, Graylog, and Syslog. For full details on configuring the Tyk logger, see [this section](/api-management/logs-metrics#system-logs). + +
+ + +Logging event handlers are currently only supported by Tyk Classic APIs. + + + +### Configuring the event handler + +Registering a logging event handler to your Tyk Classic API is the same as adding any other event handler, within the `event_handlers` section of the API definition. + +The `handler_name` for the logging event handler should be set to: `eh_log_handler`. + +The `handler_meta` for the logging event handler contains a single field: +- `prefix` is a label that will be prepended to each log entry + +For example, to register event handlers to log the `AuthFailure` and `KeyExpired` events you might add the following to your API definition: + +```json +{ + "event_handlers": { + "events": { + "AuthFailure": [ + { + "handler_name": "eh_log_handler", + "handler_meta": { + "prefix": "AuthFailureEvent" + } + } + ], + "KeyExpired": [ + { + "handler_name": "eh_log_handler", + "handler_meta": { + "prefix": "KeyExpiredEvent" + } + } + ] + } + } +} +``` + +In this example +- the `AuthFailure` event will trigger the event handler to generate a log with the prefix `AuthFailureEvent` +- the `KeyExpired` event will trigger the event handler to generate a log with the prefix `KeyExpiredEvent` + +When the event handler is triggered an entry will be made in the log containing the corresponding prefix, which can be useful for monitoring and debugging purposes. + +## Event handling with webhooks + +### Overview + +A webhook is a mechanism for real-time, event-driven communication between different systems or applications over the internet. It is an HTTP callback, typically an HTTP POST request that occurs when something happens. Webhooks are real-time, automated and lightweight. Notifications are sent immediately when events occur without the need for the receiving service to poll. + +In the context of Tyk Gateway, webhooks are event handlers that can be registered against API Events. The webhook will be triggered when the corresponding event is fired and will send a customizable fixed payload to any open endpoint. + +#### When to use webhook event handlers + +There are many occasions when you might use webhooks for event handling, here are just a few examples. + +##### Rate limit violations + +When an API consumer exceeds their allocated rate limit, the `RatelimitExceeded` event will be fired. A webhook event handler can be employed to notify an upstream system to take actions such as updating a dashboard, notifying the account manager, or adjusting the client's service tier. + +##### API key lifecycle events + +When an expired API key is used to access an API, the client will receive an error and the `KeyExpired` event will be fired. A webhook event handler can be employed to notify an upstream system to take actions such as renewing the key, logging the failure in a CRM or notifying the account manager to initiate customer communication. + +##### Upstream service problems + +When an API circuit breaker triggers due to an unresponsive upstream service, the `BreakerTripped` event will be fired. A webhook event handler can be employed to update monitoring dashboards or to trigger automated recovery scripts or processes. + +#### How webhook event handlers work + +With Tyk Gateway, the webhook event handler is a process that runs asynchronously in response to an API event being fired. It will issue an HTTP request to any open endpoint and is fully configurable within the API definition. + +The HTTP method, body, header values, and target URL can all be configured in the API definition. The [request body](#webhook-payload) is generated using a Tyk template file that has access to the [event metadata](/api-management/gateway-events#event-metadata). + +The webhook event handler runs in its own process and so does not block the operation of the Gateway. + +##### Webhook cooldown + +It is very likely that an `AuthFailure` event will fire on the same endpoint more than once if the requesting client is automated. If this event triggered a webhook that caused an email to be sent, then if this event occurred 10 times a second, the email recipient would be flooded with emails. In an attempt to mitigate against events such as this, you can set a cooldown timer, in the webhook handler. This prevents the webhook from being triggered again if the event is fired again within the time period specified. + +##### Webhook payload + +When your webhook event handler is triggered, it will send an HTTP request to the configured target. For HTTP methods that support a request body, for example `POST`, the event handler will process a [Go template](/api-management/traffic-transformation/go-templates) to produce the payload. + +If no template is provided in the webhook event handler configuration in the API definition, Tyk Gateway will look for the default file `templates/default_webhook.json`. Any text file accessible to the Gateway can be used to store the Go template to be used by the event handler when constructing the payload. + +The event handler has access to the [event metadata](/api-management/gateway-events#event-metadata) and this can be accessed by the template using the `{{.Meta.XXX}}` namespace. + +The [event type](/api-management/gateway-events#event-types) that triggered the event handler can be accessed as `{{.Type}}`. + +For most event types, the default webhook template has this form: + +```json +{ + "event": "{{.Type}}", + "message": "{{.Meta.Message}}", + "path": "{{.Meta.Path}}", + "origin": "{{.Meta.Origin}}", + "key": "{{.Meta.Key}}" +} +``` + +This would generate a request body (payload) such as: +```json +{ + "event": "RatelimitExceeded", + "message": "API Rate Limit Exceeded", + "path": "/example-global-webhook/", + "origin": "99.242.139.220", + "key": "apilimiter-66336c67cb7191f791f167134b20d1f4c14b4bb5672b57f4b2813c86" +} +``` + +#### Using webhooks with Tyk Dashboard + +Webhook event handlers are configured within the API definition, which is used by Tyk Gateway to determine the appropriate action to be performed in response to a Gateway event. + +When using Tyk Dashboard, you are able to create *global webhooks* that can be re-used across multiple events and APIs, allowing you to modify the webhook configuration for a batch of APIs and/or events from one location. + +##### Local and global webhooks + +Tyk Dashboard supports the declaration of webhooks *globally* and *locally*: +- **Global webhooks** are declared outside the API definition and linked via a *webhook id*; changes to the global webhook definition will be reflected in all APIs that reference that *webhook id* +- **Local webhooks** are fully defined within the API definition; changes to the local webhook configuration will affect only the API within which it is defined + +*Global webhook definitions* are registered with the Dashboard using the [UI](#creating-a-global-webhook-definition-using-tyk-dashboard) or [Dashboard API](/api-management/dashboard-configuration#web-hooks-api) and assigned a unique *webhook id* that can be obtained via the [Dashboard API](/api-management/dashboard-configuration#list-web-hooks) or via drop-down selection within the UI. + +If you assign a global webhook definition to an API to handle an event, then Tyk Dashboard will retrieve the definition and update it in the API definition when the API is loaded (or re-loaded) to the Gateway. + +##### Creating a global webhook definition using Tyk Dashboard + +To create a global webhook definition from the Dashboard UI you should follow these steps: + +**Steps for Configuration** + +1. **Create the webhook definition** + + Select **Webhooks** from the **API Management** Menu: + + Webhooks menu item + + Click **Add Webhook**. + + Add webhook button + +2. **Configure the webhook** + + Now you need to tell Tyk how and where to send the request. You can include custom headers, for example to inform the target service that the request has come from Tyk - remember to click **ADD** to add the custom header to the configuration. + + Add webhook detail + + Click **Save** to save it. + +
+ +If you're using Tyk OAS APIs, then you can find details and examples of how to configure webhook event handlers [here](/api-management/gateway-events#webhook-event-handlers-with-tyk-oas-apis). + +If you're using Tyk Classic APIs, then you can find details and examples of how to configure webhook event handlers [here](/api-management/gateway-events#webhook-event-handlers-with-tyk-classic-apis). + +### Webhook event handlers with Tyk OAS APIs + +[Webhooks](/api-management/gateway-events#event-handling-with-webhooks) are event handlers that can be registered against API Events. The webhook will be triggered when the corresponding event is fired and will send a customizable fixed payload to any open endpoint. + +Webhooks are configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](/api-management/gateway-events#webhook-event-handlers-with-tyk-classic-apis) page. + +#### Set up a webhook event handler in the Tyk OAS API Definition + +Event handling is configured by adding the `eventHandlers` object to the `server` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition. + +The `eventHandlers` object is an array containing configurations for all event handlers registered with the API. + +##### Local webhook configuration + +When using a local webhook, the event handler element in the `eventHandlers` object has the following configuration which fully declares the webhook behaviour: +- `enabled`: enable the event handler +- `trigger`: the API event that will trigger the webhook +- `type`: the type of event handler, in this case should be set to `webhook` +- `cooldownPeriod`: the [webhook cooldown](/api-management/gateway-events#webhook-cooldown) for duplicate events (in duration format, e.g. 10s, 1m30s); use this to prevent flooding of the target endpoint when multiple events are fired in quick succession +- `name`: a human readable name for the webhook, which will be displayed in Tyk Dashboard +- `url`: this is an **absolute URL** to which the request will be sent +- `method`: this can be any of `GET`, `PUT`, `POST`, `PATCH` or `DELETE` and will be the HTTP method used to send the request; methods that do not support an encoded request body will not have the event metadata provided with the request; we advise using `POST` where possible +- `bodyTemplate`: this is the path to the [webhook template](/api-management/gateway-events#webhook-payload) that will be used to construct the request body +- `headers`: a map of custom headers to be provided with the request + +For example: +```json {hl_lines=["18-33"],linenos=true, linenostart=1} +{ + "info": { + "title": "example-local-webhook", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": {}, + "components": {}, + "x-tyk-api-gateway": { + "info": { + "name": "example-local-webhook", + "state": { + "active": true + } + }, + "server": { + "eventHandlers": [ + { + "enabled": true, + "trigger": "RatelimitExceeded", + "cooldownPeriod": "1s", + "type": "webhook", + "name": "My local webhook", + "url": "https://webhook.site/", + "method": "POST", + "headers": [ + { + "name": "X-Tyk", + "value": "example-local-webhook" + } + ], + "bodyTemplate": "templates/default_webhook.json" + } + ], + "listenPath": { + "strip": true, + "value": "/example-local-webhook/" + } + }, + "upstream": { + "rateLimit": { + "enabled": true, + "per": "10s", + "rate": 2 + }, + "url": "http://httpbin.org/" + } + } +} +``` + +In this example a local webhook has been registered to trigger when the `RatelimitExceeded` event is fired. The request rate limit has been set at 2 requests per 10 seconds, so simply make three requests in quick succession to trigger the webhook. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the local webhook feature. + +Note that to test this you will need to provide a valid target URL for your webhook to send the request; we've used `http://webhook.site`. + + +##### Global webhook configuration + +When using a *global webhook*, the event handler element in the `eventHandlers` object has the following configuration, which references the externally declared webhook using its `id`: +- `enabled`: enable the event handler +- `trigger`: the API event that will trigger the webhook +- `type`: the type of event handler, in this case should be set to `webhook` +- `cooldownPeriod`: the [webhook cooldown](/api-management/gateway-events#webhook-cooldown) for duplicate events (in duration format, e.g. 10s, 1m30s); use this to prevent flooding of the target endpoint when multiple events are fired in quick succession +- `id`: the *webhook id* assigned by Tyk to the global webhook when it was created (this can be determined using the [list webhooks](/api-management/dashboard-configuration#list-web-hooks) endpoint in the Tyk Dashboard API) + +For example: + +```json {hl_lines=["18-24"],linenos=true, linenostart=1} +{ + "info": { + "title": "example-global-webhook", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": {}, + "components": {}, + "x-tyk-api-gateway": { + "info": { + "name": "example-global-webhook", + "state": { + "active": true + } + }, + "server": { + "eventHandlers": [ + { + "enabled": true, + "trigger": "RatelimitExceeded", + "cooldownPeriod": "1s", + "type": "webhook", + "id": "" + } + ], + "listenPath": { + "strip": true, + "value": "/example-global-webhook/" + } + }, + "upstream": { + "rateLimit": { + "enabled": true, + "per": "10s", + "rate": 2 + }, + "url": "http://httpbin.org/" + } + } +} +``` + +In this example a local webhook has been registered to trigger when the `RatelimitExceeded` event is fired. The request rate limit has been set at 2 requests per 10 seconds, so simply make three requests in quick succession to trigger the webhook. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the global webhook feature. + +Note, however, that to test this you will need to create a *global webhook* in your Tyk Dashboard and replace the value in `id` with the *webhook id* that Tyk Dashboard has allocated to your webhook. You can find this by querying the [list webhooks](/api-management/dashboard-configuration#list-web-hooks) endpoint in the Tyk Dashboard API. +
+
+ + +When a *global webhook* is registered to a Tyk OAS API, Tyk will create a read-only copy of the webhook [configuration](#local-webhook-configuration) (`url`, `method`, `bodyTemplate`, `headers`) within the API definition. This is so that Tyk Gateway knows how to handle the event, as it does not have access to the store of *global webhooks* registered with Tyk Dashboard. +
+
+If the global webhook is subsequently deleted from the Tyk Dashboard, the webhook will automatically be converted to a local webhook in any API definition that was using it. +
+ + + +#### Set up a webhook event handler in the Tyk Dashboard + +It is very simple to register webhooks to be triggered in response to specific API events when using Tyk OAS APIs with the Tyk Dashboard. The API Designer in the Dashboard allows you to define *local webhooks* and to register *global webhooks* to handle events. + +If you want to use a *global webhook* then you'll need to declare it first, following [these instructions](/api-management/gateway-events#creating-a-global-webhook-definition-using-tyk-dashboard). + +1. **Add event handler** + + From the **Settings** tab in the API Designer, scroll down to the **Server** section to find the **Event Handlers** pane. Select **Add Event**. + + Add an event handler from the Server section + +2. **Choose the event to be handled** + + This will add an event handler to the API. You'll need to select which event you want to handle from the drop-down list. Note that currently Tyk OAS only supports webhook event handlers, so this will default to *webhook* type. + + Choose the event that will trigger the webhook + +3. **Choose and configure global webhook** + + If you want to use a webhook that you've already registered with Tyk Dashboard, ensure that the **Webhook source** is set to **Global webhook** then select from the drop-down list. + + The only other thing you'll need to configure is the cooldown period. + + Select from the list of available global webhooks + + Note that Tyk automatically retrieves the details of the *global webhook* and displays them (read-only) in the API designer. + + A fully configured global webhook + + Don't forget to select **Save API** to apply the changes. + +4. **Configure local webhook** + + If you don't want to use a shared *global webhook* but instead want to configure a *local webhook* only available to this API/event then you should ensure that the **Webhook source** is set to **Local webhook**. + + Ready to configure a local webhook + + Now you can complete the various fields to set up your *local webhook*. If you want to add custom headers to send with the HTTP request, select **New Header** then enter the header key and value. + + A fully configured global webhook + + Don't forget to select **Save API** to apply the changes. + +### Webhook event handlers with Tyk Classic APIs + +[Webhooks](/api-management/gateway-events#event-handling-with-webhooks) are event handlers that can +be registered against API Events. The webhook will be triggered when the corresponding event is fired and will send a +customisable fixed payload to any open endpoint. + +Webhooks are configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API +Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk +OAS](/api-management/gateway-events#webhook-event-handlers-with-tyk-oas-apis) +page. + +#### Set up a webhook event handler in the Tyk Classic API Definition + +To add a webhook event handler you must add a new event handler object within the `event_handlers.events` section of the +API definition for the appropriate [API event](/api-management/gateway-events#event-types). + +The event handler object has the following configuration: + +- `handler_name`: this identifies the type of event handler and must be set to `eh_web_hook_handler` +- `handler_meta`: this structure configures the HTTP request that will be sent when the webhook is triggered + +The `handler_meta` object has the following configuration: + +- `method`: this can be any of `GET`, `PUT`, `POST`, `PATCH` or `DELETE` and will be the HTTP method used to send the + request; methods that do not support an encoded request body will not have the event metadata provided with the + request; we advise using `POST` where possible +- `target_path`: this is an **absolute URL** to which the request will be sent +- `template_path`: this is the path to the [webhook + template](/api-management/gateway-events#webhook-payload) that will be + used to construct the request body +- `header_map`: a map of custom headers to be provided with the request +- `event_timeout`: the [webhook + cooldown](/api-management/gateway-events#webhook-cooldown) for duplicate + events (in seconds); use this to prevent flooding of the target endpoint when multiple events are fired in quick succession + +For example: + +```json {linenos=true, linenostart=1} +{ + "event_handlers": { + "events": { + "AuthFailure": [ + { + "handler_name": "eh_web_hook_handler", + "handler_meta": { + "method": "POST", + "target_path": "http://posttestserver.com/post.php?dir=tyk-event-test", + "template_path": "templates/default_webhook.json", + "header_map": { "X-Tyk-Test-Header": "Tyk v1.BANANA" }, + "event_timeout": 10 + } + } + ] + } + } +} +``` + +In this example, when the `AuthFailure` event is fired, the webhook event handler will send a request to +`POST http://posttestserver.com/post.php?dir=tyk-event-test` and then start a 10 second cooldown before another webhook +request can be sent. + +The request will have one custom header `X-Tyk-Test-Header: Tyk v1.BANANA` and the body will be constructed from the +webhook template located at `templates/default_webhook.json`. + + + +This manually configured webhook event handler is private to the API within which it has been defined, it is not a +[global +webhook](/api-management/gateway-events#using-webhooks-with-tyk-dashboard). + + + +#### Set up a webhook event handler in the Tyk Dashboard + +It is very simple to register webhooks to be triggered in response to specific API events when using Tyk Classic APIs +with the Tyk Dashboard. The API Designer in the Dashboard allows you to register _global webhooks_ to handle events. + +Note that Tyk Gateway does not have access to the _global webhook_ definitions registered with Tyk Dashboard and can +only operate on the configuration within the API definition. Dashboard will manage the conversion of _global webhooks_ +to [locally defined webhook handlers](#set-up-a-webhook-event-handler-in-the-tyk-classic-api-definition) within the Tyk +Classic API definition, automatically updating the configuration in each API definition when the APIs are reloaded to +the Gateway. + +1. **Define the webhook** + + Before you can configure a webhook event handler for your API, you must first create a global webhook from the + **Webhooks** screen in the **API Management** menu, as described + [here](/api-management/gateway-events#creating-a-global-webhook-definition-using-tyk-dashboard). + +2. **Register the webhook with the event** + + From the API Designer select the **Advanced Options** tab and locate the **Webhooks** panel: + + Webhook API Details + + Now: + + - select the _API Event_ for which you want to trigger the webhook from the dropdown list + - select the _Webhook to use_ when the event fires, again from the dropdown list + - finally, configure the required _Cooldown period_ + - click **Add** + + Note that you can register multiple webhooks to be triggered in response to a single event and you can register the same + webhook with multiple API events. + + Remember to click **Save** to save your changes. + +#### Set up a webhook event handler in Tyk Operator + +Tyk Operator supports event handler integration for Tyk Classic API Definition. Configuring the `event_handlers` field +in ApiDefinition Custom Resource Definition (CRD) enables webhooks to be triggered by [specific +API events](/api-management/gateway-events#event-types). + +The process for configuring webhook event handlers using Tyk Operator is similar to that explained in +[Set up a webhook event handler in the Tyk Classic API Definition](#set-up-a-webhook-event-handler-in-the-tyk-classic-api-definition). +The example API Definition below enables the event handler by setting `spec.event_handlers`. + +```yaml {hl_lines=["14-25"],linenos=true, linenostart=1} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: webhook-handler +spec: + name: webhook-handler + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /webhook-handler + strip_listen_path: true + event_handlers: + events: + AuthFailure: + - handler_name: "eh_web_hook_handler" + handler_meta: + method: "POST" + name: "webhook name" + target_path: "http://posttestserver.com/post.php?dir=tyk-event-test" + template_path: "templates/default_webhook.json" + header_map: + X-Tyk-Test-Header: "Tyk v1.BANANA" + event_timeout: 10 +``` + + +## Logging API events + +Tyk’s built-in logging event handler is designed primarily for debugging purposes and will store details of an API event to the configured logger output. + +The Tyk platform can be configured to log at various verbosity levels (info, debug, warn, error) and can be integrated with third-party log aggregation tools like Sentry, Logstash, Graylog, and Syslog. For full details on configuring the Tyk logger, see [this section](/api-management/logs-metrics#system-logs). + +
+ + +Logging event handlers are currently only supported by Tyk Classic APIs. + + + +### Configuring the event handler + +Registering a logging event handler to your Tyk Classic API is the same as adding any other event handler, within the `event_handlers` section of the API definition. + +The `handler_name` for the logging event handler should be set to: `eh_log_handler`. + +The `handler_meta` for the logging event handler contains a single field: +- `prefix` is a label that will be prepended to each log entry + +For example, to register event handlers to log the `AuthFailure` and `KeyExpired` events you might add the following to your API definition: + +```json +{ + "event_handlers": { + "events": { + "AuthFailure": [ + { + "handler_name": "eh_log_handler", + "handler_meta": { + "prefix": "AuthFailureEvent" + } + } + ], + "KeyExpired": [ + { + "handler_name": "eh_log_handler", + "handler_meta": { + "prefix": "KeyExpiredEvent" + } + } + ] + } + } +} +``` + +In this example +- the `AuthFailure` event will trigger the event handler to generate a log with the prefix `AuthFailureEvent` +- the `KeyExpired` event will trigger the event handler to generate a log with the prefix `KeyExpiredEvent` + +When the event handler is triggered an entry will be made in the log containing the corresponding prefix, which can be useful for monitoring and debugging purposes. + +## Custom API event handlers + +Tyk supports you to script your own custom code in JavaScript (JS) that will be invoked in response to API events. This is executed asynchronously so you don't need to worry about it blocking the Gateway handling requests. Event handlers like this can be very powerful for automating session, user and API-level functions. + +It is important to note that unlike custom JavaScript [plugins](/api-management/plugins/javascript#), custom event handlers execute in a *global* JavaScript environment. This means that you need to be careful when naming the event handlers: if you use the same event handler name for different event handling code across two APIs, only one of them will execute, as the other will be overridden when loaded. + +Custom event handlers have access to the [JavaScript API](/api-management/plugins/javascript#javascript-api) which gives access to the session object and enables your code to make HTTP calls. This is particularly useful if you want to interface with another API with a complex request/response cycle. + +
+ + +Custom event handlers are currently only supported by Tyk Classic APIs. + + + +### Creating a custom event handler + +A custom event handler consists of a function that accepts two variables (`event` and `context`) and has no return value. + +Creating an event handler is very similar to [creating custom JS plugins](/api-management/plugins/javascript#using-javascript-with-tyk), simply invoke the correct constructors with a closure in the TykJS namespace: + +```js +// ---- Sample custom event handler ----- +var sampleHandler = new TykJS.TykEventHandlers.NewEventHandler({}); + +sampleHandler.NewHandler(function(event, context) { + // You can log to Tyk console output by calling the built-in log() function: + log("This handler does nothing, but this will appear in your terminal") + + return +}); +``` + +#### The `event` object + +This contains the [event metadata](/api-management/gateway-events#event-metadata) in the following structure: + +```json +{ + "EventType": "Event Type Code", + "EventMetaData": { + "Message": "My Event Description", + "Path": "/{{api_id}}/{{path}}", + "Origin": "1.1.1.1:PORT", + "Key": "{{Auth Key}}" + }, + "TimeStamp": "2024-01-01 23:59:59.111157073 +0000 UTC" +} +``` + +#### The `context` Variable + +Tyk injects a `context` object into your event handler giving access to more information about the request. This object has the following structure: + +```js +type JSVMContextGlobal struct { + APIID string + OrgID string +} +``` + +It is populated with the API ID and Org ID of the request that your custom function can use together with the `event` metadata to interact with the Tyk REST API functions, for example: + +```js +// Use the TykGetKeyData function to retrieve a session from the session store, use the context variable to give the APIID for the key. +var thisSession = JSON.parse(TykGetKeyData(event.EventMetaData.Key, context.APIID)) +log("Expires: " + thisSession.expires) +``` + +### Registering a custom event handler + +Registering a custom event handler to your Tyk Classic API is the same as adding any other event handler, within the `event_handlers` section of the API definition. + +The `handler_name` for a custom event handler should be set to: `eh_dynamic_handler`. + +The `handler_meta` for a custom event handler consists of two fields: +- `name` is the unique name of your middleware object +- `path` is the relative path to the file (it can be absolute) + +For example, to register a custom event handler with the name `sessionHandler` to be invoked in response to the `KeyExpired` event you would add the following to your API definition: + +```json +{ + "event_handlers": { + "events": { + "KeyExpired": [ + { + "handler_name":"eh_dynamic_handler", + "handler_meta": { + "name": "sessionHandler", + "path": "event_handlers/session_editor.js" + } + } + ] + } + } +} +``` + +### Loading custom event handlers + +The JavaScript files are loaded on API reload into the global JSVM. If a hot-reload event occurs, the global JSVM is re-set and files are re-loaded. This could cause event handlers that are currently executing to get abandoned. This is a measured risk and should not cause instability, however it should be noted that because of this, in an environment where reloads occur frequently, there is risk that event handler may not fire correctly. + +## Monitoring quota consumption + +Tyk provides the ability to actively monitor both user and organization quotas, using a dedicated webhook to notify your stakeholders, your system stack or the requesting API client when certain thresholds have been reached for a token. + +Unlike API event [webhooks](/api-management/gateway-events#event-handling-with-webhooks) the quota monitor is configured at the Gateway level. + +
+ + +Advanced quota threshold monitoring is currently only supported by Tyk Classic APIs. + + + +### Configuring the quota consumption monitor + +To enable advanced quota monitoring you will need to add a new `monitor` section to your Tyk Gateway configuration file (`tyk.conf`). + +This has the following fields: +- `enable_trigger_monitors`: set to `true` to have the monitors start to measure quota thresholds +- `configuration`: a [webhook configuration](/api-management/gateway-events#event-handling-with-webhooks) object +- `global_trigger_limit`: this is a percentage of the quota that the key must consume for the webhook to be fired +- `monitor_user_keys`: set to `true` to monitor individual tokens (this may result in a large number of triggers as it scales with the number of user tokens that are issued) +- `monitor_org_keys`: set to `true` to monitor organization quotas + +For example: + +```json +{ + "monitor": { + "enable_trigger_monitors": true, + "configuration": { + "method": "POST", + "target_path": "http://posttestserver.com/post.php?dir=tyk-monitor-drop", + "template_path": "templates/monitor_template.json", + "header_map": {"x-tyk-monitor-secret": "12345"}, + "event_timeout": 10 + }, + "global_trigger_limit": 80.0, + "monitor_user_keys": false, + "monitor_org_keys": true + } +} +``` + +With this configuration, a monitor is configured to issue a request to `POST http://posttestserver.com/post.php?dir=tyk-monitor-drop` when 80% of the API-level quota has been consumed. This request will have the `x-tyk-monitor-secret` header (set to a value of `12345`) and will provide the content of the template file found at `templates/monitor_template.json` in the request body. A minimum of 10 seconds will elapse between successive monitor webhooks being fired. + +
+ + +If you are using our [Classic Developer Portal](/tyk-developer-portal/tyk-portal-classic/portal-events-notifications), developers registered in the portal will also receive emails about quota threshold limits being reached. + + + +#### Setting advanced thresholds + +The default quota consumption monitor will be triggered at the same level of quota usage for all users. Sometimes you might want to have a more granular approach with different triggering thresholds per user or organization. Sometimes you might want to fire the event at multiple thresholds, for example when the user hits 50%, 75% and 90% of their allowed quota. + +You can set user specific trigger levels for a user by additionally adding a `monitor` section to the access key ([Session Object](/api-management/policies#what-is-a-session-object)). This has one field, which is an array of `trigger_limits` (thresholds) that must be in *descending* order and represent the percentage of the quota that must be reached in order for the trigger to be fired, for example: + +```yaml +"monitor": { + "trigger_limits": [90.0, 75.0, 50.0] +} +``` + +If this is included in the session object, then the quota threshold event will be fired and the monitor webhook triggered when the user hits 50%, then 75%, and then again at 90% consumption. + +You can configure advanced thresholds for all users in an organization by adding the `monitor` section to the organization session object. + +### Webhook payload + +When the quota consumption monitor is fired, the webhook request that is issued will have the following payload: + +```json +{ + "event": "TriggerExceeded", + "message": "Quota trigger reached", + "org": "53ac07777cbb8c2d53000002", + "key": "", + "trigger_limit": "80", +} +``` + +- `trigger_limit` will indicate which threshold has been reached (as defined in the session object's `monitor` section). +- `org` will contain the OrgID for the user or organization that triggered the event +- `key` will contain the *raw API key* used in the request only if the event was triggered by a user quota + +*Note: if the webhook was triggered by an organization threshold, `key` will be blank.* + +
+ + +When the monitor is triggered by a user hitting their quota threshold, the raw API key is provided in the webhook payload. It is important to secure the webhook endpoint and to handle the payload securely on the receiving end. + + + +## Error Templates + +In v2.2 the error handler allowed the use a single JSON template to communicate errors to users (a default one is shipped with Tyk, it's located in `templates/error.json`). + +As of v2.3 it is possible to use different templates for specific `HTTP error codes`. The `content-type` header of the request is also checked, enabling the usage of different template formats, e.g. an XML template. + +Please note that it is not possible to override the default message for HTTP 404 errors. These errors indicate that the requested resource could not be found (e.g. the requested URL does not exist). + +### Use Cases + +#### JSON Request + +When a HTTP 500 error occurs, and the request is a JSON request, Tyk will follow this logic: + +* If `templates/error_500.json` exists, this template will be used. +* Otherwise, Tyk will use `templates/error.json`. + +#### XML Request + +When a HTTP 500 error occurs, and the request is a XML request, Tyk will follow this logic: + +* If `templates/error_500.xml` exists, this template will be used. +* If no specific template exists for this HTTP code, `templates/error.xml` will be used. +* If `error.xml` doesn't exist, `templates/error.json` will be used. + +#### Removing the X-Generator Header + +In case of an error, the Tyk Gateway adds the following fixed header and value: `X-Generator: tyk.io` +Please note that for `404 Not found` errors, Tyk will not return this header from a security perspective. To mitigate this issue, in case you want to better understand your clients and provide you, the manager of the platform, with error information, you can set `track_404_logs` to `true` in your `tyk.conf` which will then produce error logs showing the resources that were requested and not found. + +If you don't want to return our default X-Generator header (set to tyk.io) in your templates, set `hide_generator_header` to `true` in your `tyk.conf` file diff --git a/api-management/graphql.mdx b/api-management/graphql.mdx new file mode 100644 index 000000000..4bb852818 --- /dev/null +++ b/api-management/graphql.mdx @@ -0,0 +1,2488 @@ +--- +title: "GraphQL" +description: "How to configure GraphQL" +keywords: "GraphQL, Federation, Entities, Grapqhl Proxy, Validation, Schema, Complexity Limiting, Persisted Queries, Migration Guide, GraphQL Playground, GQL Headers" +sidebarTitle: "Overview" +--- + +import { ButtonLeft } from '/snippets/ButtonLeft.mdx'; + +## Overview + +Tyk has **native** GraphQL support, so it doesn’t require any external services or middleware. +It fully complies with the latest GraphQL specifications, as outlined on the [GraphQL Foundation webpage](https://spec.graphql.org/), including: + +- **[Queries](https://spec.graphql.org/October2021/#sec-Query)** – Fetching data +- **[Mutations](https://spec.graphql.org/October2021/#sec-Mutations)** – Modifying data +- **[Subscriptions](https://spec.graphql.org/October2021/#sec-Subscription)** – Real-time updates +- **[Schema Types](/api-management/graphql/graphql-schema-types)** - Defining your data structure + + +### What can you do with GraphQL and Tyk? + +You can securely expose existing GraphQL APIs using our [GraphQL core functionality](/api-management/graphql#create-a-graphql-api). + +In addition to this, you can also use Tyk's integrated GraphQL engine to build a [Universal Data Graph](/api-management/data-graph#overview). The Universal Data Graph (UDG) lets you expose existing services as one single combined GraphQL API. + +See our video on getting started with GraphQL. + + + +### What is GraphQL? + +> GraphQL is a query language for APIs and a runtime for fulfilling those queries with your existing data. GraphQL provides a complete and understandable description of the data in your API, gives clients the power to ask for exactly what they need and nothing more, makes it easier to evolve APIs over time, and enables powerful developer tools. + +source: [GraphQL Foundation website](https://graphql.org/) + +### Why would you want to use GraphQL? + +Since this is the documentation section, we won't get into a debate about GraphQL vs REST. The main benefits of using GraphQL are: +* **Reduced network traffic** One of the biggest benefits of GraphQL is that it allows clients to specify exactly what data they need. This means that you can avoid sending unnecessary data over the network, which can help reduce the amount of traffic and improve the performance of your application. +* **Flexibility** GraphQL is very flexible and can be used with many different programming languages and frameworks. It can also be used to retrieve data from multiple sources, such as databases, APIs, and even third-party services. +* **Simplified data fetching** With GraphQL, you can fetch all the data you need with a single request. This is because GraphQL allows you to specify exactly what data you need and how it should be structured, which can simplify the process of fetching data and reduce the amount of code you need to write. +* **Easy maintenance** Because GraphQL allows you to define a schema for your data, it can be easier to maintain and evolve your API over time. This is because changes to the schema can be made without breaking existing clients, as long as the changes are backward compatible. +* **Strong typing** GraphQL has a strong type system that allows you to define the shape of your data and ensure that the data you receive is of the correct type. This can help catch errors early on and make your code more reliable. +* **Better developer experience for certain use cases** Examples of those use cases mostly mentioned by developers are: APIs with multiple consumers that have very different requirements, public APIs with large groups of unknown users (like Shopify of Github), rapidly evolving APIs, backends for mobile applications, aggregating data from multiple microservices and development of data-driven products. + +Our team has also published some blog posts that go deeper into GraphQL discussions. You can check some of them here: +* [How Airbnb, Shopify, GitHub and more are winning with GraphQL](https://tyk.io/blog/how-airbnb-shopify-github-and-more-are-winning-with-graphql-and-why-you-may-need-it-too/) +* [Who is Tyk GraphQL functionality for](https://tyk.io/blog/using-tyks-new-graphql-functionality-whos-it-for-and-what-does-it-do/) +* [GraphQL: Performance is no longer a trade-off](https://tyk.io/blog/graphql-performance-is-no-longer-a-trade-off/) + +## Create a GraphQL API + +GraphQL API can be created in Tyk using: +* Tyk Dashboard UI +* Tyk Dashboard API +* Tyk Gateway API - for OSS users + +The process is very similar to [HTTP API creation](/api-management/gateway-config-managing-classic#create-an-api) with a few additional steps to cover GraphQL-specific functionalities. + +### Via Tyk Dashboard UI + +#### Prerequisites + +In order to complete the next steps, you need to have [Tyk Self Managed installed](/tyk-self-managed/install). You can also create a 5-week trial account in Tyk Cloud. + + + +#### Steps for Configuration + +1. **Select "APIs" from the "System Management" section** + + API Menu + +2. **Click "ADD NEW API"** + + Add API button location + +3. **Set up the Base Configuration for your API** + + Create GQL API + + - From the **Overview** section, add your **API Name** and your API **Type** (In this case it's GraphQL). + - From the **Details** section, add your **Target URL**. This will set the upstream origin that hosts the service you want to proxy to. As an example, you can use [https://countries.trevorblades.com/](https://countries.trevorblades.com/). + - In case your upstream GQL service is protected, tick the box next to **Upstream Protected** and provide authorization details, so that Tyk can introspect the GraphQL service. You can provide authorization details as a set of headers or a certificate. [Introspection](/api-management/graphql#introspection) of your upstream service is important for Tyk to correctly work with your GraphQL. + - If you would like to persist authorization information for future use you can tick the **Persist headers for future use** box. That way, if the upstream GQL schema changes in the future, you will be able to update it easily in Tyk. + - Click **Configure API** when you have finished + +4. **Set up the Authentication for your API** + + From the **Authentication** section: + + Authentication + + You have the following options: + + - **Authentication mode**: This is the security method to use with your API. First, you can set it to `Open(Keyless)`, but that option is not advised for production APIs. See [Client Authentication](/api-management/client-authentication) for more details on securing your API. + - **Strip Authorization Data**: Select this option to strip any authorization data from your API requests. + - **Auth Key Header Name**: The header name that will hold the token on inbound requests. The default for this is `Authorization`. + - **Allow Query Parameter As Well As Header**: Set this option to enable checking the query parameter as well as the header for an auth token. **This is a setting that might be important if your GQL includes subscription operations**. + - **Use Cookie Value**: It is possible to use a cookie value as well as the other two token locations. + - **Enable client certificate**: Select this to use Mutual TLS. See [Mutual TLS](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls) for details on implementing mutual TLS. + +5. **Save the API** + + Click **SAVE** + + Save button + + Once saved, you will be taken back to the API list, where the new API will be displayed. + + To see the URL given to your API, select the API from the list to open it again. The API URL will be displayed at the top of the editor: + + API URL location + + Your GQL API is now secured and ready to use. + +### Via Tyk Dashboard API + +#### Prerequisites + +It is possible to create GQL APIs using [Tyk Dashboard APIs](/api-management/dashboard-configuration#manage-apis-api-definition). To make things easier you can use our [Postman collection](https://www.postman.com/tyk-technologies/workspace/tyk-public-workspace/overview). + +You will need an API key for your organization and one command to create a GQL API and make it live. + +#### Steps for Configuration + +1. **Obtain your Tyk Dashboard API Access Credentials key & Dashboard URL** + + From the Tyk Dashboard, select "Users" from the "System Management" section. + Click **Edit** for your user, then scroll to the bottom of the page. Your **Tyk Dashboard API Access Credentials** key is the first entry: + + API key location + + Store your Dashboard Key, Dashboard URL & Gateway URL as environment variables so you don't need to keep typing them in: + + ```bash + export DASH_KEY=db8adec7615d40db6419a2e4688678e0 + + # Locally installed dashboard + export DASH_URL=http://localhost:3000/api + + # Tyk's Cloud Dashboard + export DASH_URL=https://admin.cloud.tyk.io/api + + # Locally installed gateway + export GATEWAY_URL=http://localhost:8080 + + # Your Cloud Gateway + export GATEWAY_URL=https://YOUR_SUBDOMAIN.cloud.tyk.io + ``` + +2. **Query the `/api/apis` endpoint to see what APIs are loaded** + + ```curl + curl -H "Authorization: ${DASH_KEY}" ${DASH_URL}/apis + {"apis":[],"pages":1} + ``` + + For a fresh install, you will see that no APIs currently exist. + +3. **Create your first GQL API** + + This example API definition configures the Tyk Gateway to reverse proxy to the [https://countries.trevorblades.com/](https://countries.trevorblades.com/) public GraphQL service. + + To view the raw API definition object, you may visit: https://bit.ly/3zmviZ3 + + ```curl + curl -H "Authorization: ${DASH_KEY}" -H "Content-Type: application/json" ${DASH_URL}/apis \ + -d "$(wget -qO- https://bit.ly/3zmviZ3)" + {"Status":"OK","Message":"API created","Meta":"64270eccb1821e3a5c203d98"} + ``` + + Take note of the API ID returned in the meta above - you will need it later. + + ``` + export API_ID=64270eccb1821e3a5c203d98 + ``` + +4. **Test your new GQL API** + + ```curl + curl --location ${GATEWAY_URL}/trevorblades/ + --header 'Content-Type: application/json' + --data '{"query":"query {\n countries {\n name\n capital\n awsRegion\n }\n}","variables":{}}' + ``` + + You just sent a request to the gateway on the listen path `/trevorblades`. Using this path-based-routing, the gateway was able to identify the API the client intended to target. + + The gateway stripped the listen path and reverse-proxied the request to https://countries.trevorblades.com/ + +5. **Protect your API** + + Let's grab the API definition we created before and store the output in a file locally. + + ```curl + curl -s -H "Authorization: ${DASH_KEY}" -H "Content-Type: application/json" ${DASH_URL}/apis/${API_ID} | python -mjson.tool > api.trevorblades.json + ``` + + We can now edit the `api.trevorblades.json` file we just created, and modify a couple of fields to enable authentication. + + Change `use_keyless` from `true` to `false`. + + Change `auth_configs.authToken.auth_header_name` to `apikey`. + + Then send a `PUT` request back to Tyk Dashboard to update its configurations. + + ```curl + curl -H "Authorization: ${DASH_KEY}" -H "Content-Type: application/json" ${DASH_URL}/apis/${API_ID} -X PUT -d "@api.trevorblades.json" + {"Status":"OK","Message":"Api updated","Meta":null} + ``` + +6. **Test protected API** + + Send request without any credentials + + ```curl + curl -I ${GATEWAY_URL}/trevorblades/ \ + --header 'Content-Type: application/json' \ + --data '{"query":"query {\n countries {\n name\n capital\n awsRegion\n }\n}","variables":{}}' + + HTTP/1.1 401 Unauthorized + Content-Type: application/json + X-Generator: tyk.io + Date: Wed, 04 Dec 2019 23:35:34 GMT + Content-Length: 46 + ``` + + Send a request with incorrect credentials + + ```curl + curl -I ${GATEWAY_URL}/trevorblades/ \ + --header 'Content-Type: application/json' \ + --data '{"query":"query {\n countries {\n name\n capital\n awsRegion\n }\n}","variables":{}}' \ + -H 'apikey: somekey' + + HTTP/1.1 403 Forbidden + Content-Type: application/json + X-Generator: tyk.io + Date: Wed, 04 Dec 2019 23:36:16 GMT + Content-Length: 57 + ``` + + Congratulations - You have just created your first keyless GQL API, and then protected it using Tyk! + +### Via Tyk Gateway API + +#### Prerequisites + +In order to complete the next steps, you need to have the [Tyk OSS](/tyk-oss-gateway) installed. + + + +#### Creation Methods + +With Tyk OSS, it is possible to create GQL APIs using Tyk's Gateway API or to generate a file with the same object and store it in the `/apps` folder of the Tyk Gateway installation folder. This is demonstrated [in the file-based configuration section](/api-management/manage-apis/deploy-apis/deploy-apis-overview#file-based-configuration). + + +#### Steps for Configuration + + + +A generated API ID will be added to the Tyk API definition if it's not provided while creating a GQL API with Tyk Gateway API. + + + +See our video for adding an API to the Open Source Gateway via the Gateway API and Postman: + + + +You can also use our [Postman collection](https://www.postman.com/tyk-technologies/workspace/tyk-public-workspace/overview) to make things easier. + +In order to use the Gateway API you will need an API key for your Gateway and one command to create the API and make it live. + +1. **Make sure you know your API secret** + + Your Tyk Gateway API secret is stored in your `tyk.conf` file, the property is called `secret`, you will need to use this as a header called `x-tyk-authorization` to make calls to the Gateway API. + +2. **Create a GQL API** + + To create a GQL API, let's send a definition to the `apis` endpoint, which will return the status and version of your Gateway. Change the `x-tyk-authorization` value and `curl` domain name and port to be the correct values for your environment. + + This example API definition configures the Tyk Gateway to reverse proxy to the [https://countries.trevorblades.com/](https://countries.trevorblades.com/) public GraphQL service. + + To view the raw API definition object, you may visit: https://bit.ly/3nt8KDa + + ```curl + curl --location --request POST 'http://{your-tyk-host}:{port}/tyk/apis' \ + --header 'Content-Type: application/json' \ + --header 'Accept: application/json' \ + --header 'X-Tyk-Authorization: {your-secret}' \ + --data "$(wget -qO- https://bit.ly/3nt8KDa)" + ``` + + If the command succeeds, you will see: + ```json + { + "key": "trevorblades", + "status": "ok", + "action": "added" + } + ``` + + **What did we just do?** + + We just sent an API definition to the Tyk `/apis` endpoint. API definitions are discussed in detail in the API section of this documentation. These objects encapsulate all of the settings for an API within Tyk Gateway. + + + +Notice that when creating a GQL API you need to include your GQL service schema in the API definition. Tyk Gateway doesn't have the capacity to introspect your GQL service on its own. + +Including the correct schema allows Tyk Gateway to validate incoming requests against it. More on validation can be found [here](/api-management/graphql#validation) + + + + **Restart or hot reload** + + After generating the file, you must either restart the Gateway or initiate a hot reload through an API call to the gateway, as outlined below: + ```curl + curl -H "x-tyk-authorization: {your-secret}" -s http://{your-tyk-host}:{port}/tyk/reload/group + ``` + + This command will hot-reload your API Gateway(s) and the new GQL API will be loaded, if you take a look at the output of the Gateway (or the logs), you will see that it should have loaded [Trevorblades API](https://countries.trevorblades.com/) on `/trevorblades/`. + + Your GraphQL API is now ready to use. We recommend securing any GraphQL API before publishing it. + + Check the following docs for more on GraphQL-specific security options: + * [Field based permissions](/api-management/graphql#field-based-permissions) + * [Complexity limiting](/api-management/graphql#complexity-limiting-1) + * [Introspection](/api-management/graphql#introspection) + +## GraphQL Proxy Only + +### What is GraphQL Proxy Only + +GraphQL Proxy Only is a GraphQL API with a single data source and a read-only schema. The schema is automatically loaded from the GraphQL upstream, which must support introspection queries. +Like other APIs, the GraphQL API supports policies, but with more advanced settings. + +### Creating a GraphQL API via the Dashboard UI + +1. Log in to the Dashboard and go to APIs > Add New API > GraphQL. + +Creating GraphQL Proxy Only API + +2. Choose a name for your API and provide an upstream URL + + + + + In case your upstream URL is protected, select **Upstream Protected** and provide authorization details (either Header or Certificate information). + + + +3. In this case, the upstream is protected with Basic Authentication, so we add an Authorization header. + + + + + **Persist headers for future use** checkbox is selected. That way, you will not need to provide the auth headers anymore as they will be persisted in the API definition. + + + +Adding Auth Header for GraphQL Proxy Only API + + +4. Once done, click **Configure API**, and the Dashboard API designer will show up. + +5. Configure your API and click **save**, Your API will now be saved. + +### Managing GQL Schema + +There can be a need to update/sync the schema on your GraphQL API, say when the schema on the upstream is updated. +The Dashboard UI can show the last time your API schema was synced with the upstream schema. + +schema last updated screenshot + +If you click the **Get latest version**, the gateway will make an introspection query to your upstream to fetch the schema. +You need to click **Update** on the top right button, to update your API. + + + +If you upstream is protected, you will need to provide an Authorization Header. In the Dashboard go to your API > Advanced Options > Upstream Auth headers +and fill in your credentials + + + +### Policies, Keys, and Developer Portal + +#### Field-based permission + +You may want to allow different consumers access to your GraphQL API without exposing all data to them. So for example this could be a schema for a GraphQL API: +```graphql +type Query { + accounts: [Account!] +} + +type Account { + owner: String! + number: ID! + balance: Float! +} +``` + +and you don't want some associate with a certain key to access the `balance` field on type `Account`, the gateway will respond with: +```json +{ + "errors": [ + { + "message": "field: balance is restricted on type: Account" + } + ] +} +``` +Check the [Setup field-based permission](/api-management/graphql#setup-field-based-permissions-in-dashboard) section, to learn how to configure them. + + +#### Complexity Limiting + +The complexity of a GraphQL query is about its depth. checkout this query: +```graphql +{ + continents { + countries { + continent { + countries { + continent { + countries { + name + } + } + } + } + } + } +} +``` + +The above query has a depth of seven since the nested queries are seven. + +Tyk offers a solution to limit the depth of a query. +Check out [this link](/api-management/graphql#query-depth-limit) on how to set query depth. + +#### Developer Portal + +As of Tyk v3.0.0, you can now publish GraphQL APIs to the Tyk Developer Portal. +[This section](/tyk-developer-portal/tyk-portal-classic/graphql) will show how you can expose a GraphQL API to the developer portal. + +## Introspection + +### Overview + +A GraphQL server can provide information about its schema. This functionality is called **introspection** and is achievable by sending an **introspection query** to the GraphQL server. + +If **introspection** is a completely new concept for you, browse through the official [GraphQL Specification](https://spec.graphql.org/October2021/#sec-Introspection) published by the GrapQL Foundation to find out more. + +When [creating a GraphQL proxy](/api-management/graphql#create-a-graphql-api) in Tyk Dashboard an introspection query is used to fetch the schema from the GraphQL upstream and display it in the schema tab. + + + +When using a GraphQL proxy the introspection query is always sent to the GraphQL upstream. This means that changes in the Tyk schema won't be reflected in the introspection response. You should keep the schemas synchronised to avoid confusion. + + + +#### Introspection for protected upstreams + +When you are creating a GQL API using Tyk Dashboard and your target GQL API is protected, you need to provide authorization details, so that Tyk Gateway can obtain your schema. + +In the *Create new API* screen you have to tick the **Upstream Protected** option under your Upstream URL. + + Upstream protected + + - From the **Upstream protected by** section choose the right option for your case: Headers or Certificate. + - Choosing **Headers** will allow you to add multiple key/value pairs in *Introsopection headers* section. + - You can also **Persist headers for future use** by ticking that option. This will save information you provided in case in the future your schema changes and you need to sync it again. To understand better where this information will be saved, go to [GQL Headers](/api-management/graphql#graphql-apis-headers). To read more about schema syncing go [here](/api-management/graphql#syncing-gql-schema). +- Choosing **Certificate** will allow you to provide *Domain* details and either *Select certificate* or *Enter certificate ID*. + +#### Turning off introspection + +The introspection feature should primarily be used as a discovery and diagnostic tool for development purposes. + +Problems with introspection in production: + +* It may reveal sensitive information about the GraphQL API and its implementation details. +* An attacker can discover potentially malicious operations. + +You should note that if the *Authentication Mode* is *Open(Keyless)*, GraphQL introspection is enabled and it cannot be turned off. + +GraphQL introspection is enabled in Tyk by default. You can disable the introspection per key or security policy using: +* Tyk Dashboard +* Tyk Dashboard and Gateway API + + + + +First, check the general information on [how to create a security policy with Tyk](/api-management/gateway-config-managing-classic#secure-an-api) + +For GraphQL APIs the *API ACCESS* section will show additional, GQL-specific options that can be enabled. + +Disable introspection + +You can diable introspection by changing the switch position. + +Because introspection control in Tyk works on Policy and Key level, it means you can control each of your consumer's access to introspection. You can have keys that allow introspection, while also having keys that disallow it. + + + + +First, you need to learn [how to create a security policy with Tyk API](/api-management/gateway-config-managing-classic#secure-an-api) or [how to create an API Key with Tyk API](/api-management/policies#access-key-level-security). + +Once you learn how to utilize the API to create a security policy or a key, you can use the following snippet: + +```bash +{ + "access_rights": { + "{API-ID}": { + "api_id": "{API-ID}", + "api_name": "{API-NAME}", + "disable_introspection": true, + "allowed_types": [], + "restricted_types": [] + } + } +} +``` + +With this configuration, we set `true` to `disable_introspection` field. When you try to run an introspection query on your API, you will receive an error response *(403 Forbidden)*: + +```bash +{ + "error": "introspection is disabled" +} +``` + + + + + + +Introspection also works for the [Universal Data Graph](/api-management/data-graph#overview). + +### Introspection Queries + +Any GraphQL API can be introspected with the right introspection query. Here's some examples on what introspection queries can look like and what information you can learn about the GraphQL service using them. + +#### Introspecting all types + +This query will respond with information about all types and queries defined in the schema. Additional information like *name*, *description* and *kind* will also be provided. + +```graphql +query { + __schema { + types { + name + description + kind + } + queryType { + fields { + name + description + } + } + } + } + +``` + +#### Introspecting single type details + +If you want to know more about a certain type in the schema, you can use the following query: + +```graphql + query { + __type(name: "{type name}") { + ...FullType + } + } + + fragment FullType on __Type { + kind + name + description + fields(includeDeprecated: true) { + name + description + args { + ...InputValue + } + type { + ...TypeRef + } + isDeprecated + deprecationReason + } + + inputFields { + ...InputValue + } + + interfaces { + ...TypeRef + } + + enumValues(includeDeprecated: true) { + name + description + isDeprecated + deprecationReason + } + + possibleTypes { + ...TypeRef + } + } + + fragment InputValue on __InputValue { + name + description + type { + ...TypeRef + } + defaultValue + } + + fragment TypeRef on __Type { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + } + } + } + } + } + } + } + } +``` + +#### Introspecting types associated with an interface + +The query to introspect a single type can be used for any type, but you might prefer a simpler response for types such as `interface`. With this query you can get a list of objects that implements a specific `interface`. + +```graphql +query { +__type(name: "{interface name}") { + name + kind + description + possibleTypes { + name + kind + description + } +} +} +``` + +#### Introspecting ENUM values + +An `enum` type defines a set of discrete values. With this query you can get a complete list of those values for a chosen `enum`. + +```graphql +query { +__type(name: "{enum name}") { + name + kind + description + enumValues { + name + description + } +} +} +``` + +#### Introspecting query definitions + +GraphQL requires queries to be defined in a special type `Query` in the schema. You can use the below introspection query to find out more about a query operations of the graph. + +```graphql + query { + __type(name: "Query") { + ...QueryType + } + } + + fragment QueryType on __Type { + fields { + name + description + type { + name + kind + } + args { + name + description + type { + name + kind + } + } + } + } +``` + + + +You might find GQL APIs where the `Query` type is called `QueryRoot`. In those cases the above introspection query needs to be modified in line 2 to: `__type(name: "QueryRoot")` + + + +#### Introspecting mutation and subscription definitions + +You should use the same introsopection query as you would for `Query` type, just change the name argument to `Mutation` or `Subscription`. + +#### Full introspection + +If you prefer to introspect GraphQL all at once, you can do that by sending this query: + +```graphql + + query IntrospectionQuery { + __schema { + + queryType { name } + mutationType { name } + subscriptionType { name } + types { + ...FullType + } + directives { + name + description + + locations + args { + ...InputValue + } + } + } + } + + fragment FullType on __Type { + kind + name + description + + fields(includeDeprecated: true) { + name + description + args { + ...InputValue + } + type { + ...TypeRef + } + isDeprecated + deprecationReason + } + inputFields { + ...InputValue + } + interfaces { + ...TypeRef + } + enumValues(includeDeprecated: true) { + name + description + isDeprecated + deprecationReason + } + possibleTypes { + ...TypeRef + } + } + + fragment InputValue on __InputValue { + name + description + type { ...TypeRef } + defaultValue + + + } + + fragment TypeRef on __Type { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + } + } + } + } + } + } + } + } + +``` + +Tyk also allows you to block introspection queries for security reasons if you wish to do so. More information on how to do that is provided [here](/api-management/graphql#turning-off-introspection). + +## Validation + +In order to prevent errors happening during request processing or sending invalid queries to the upstream Tyk supports the validation of GraphQL queries and schemas. + +### Query Validation +Tyk's native GraphQL engine supports validating GraphQL queries based on the [GraphQL Specification](https://spec.graphql.org/October2021/). + +Both the GraphQL engine in front of your existing GraphQL API as well as any Universal Data Graph you build gets protected with a validation middleware. + +This means, no invalid request will be forwarded to your upstream. +The Gateway will catch the error and return it to the client. + +### Schema Validation +A broken schema can lead to undesired behaviors of the API including queries not being processed by the GraphQL middleware. As the search for the root cause for +such a malfunction can be tedious, Tyk provides schema validation. + + + +Schema validation is only available when using the Dashboard or Dashboard API. + + + +The schema validation will prevent you from saving or updating an API with a broken schema. This includes schemas breaking the following rules: + - No duplicated operation types (Query, Mutation, Subscription) + - No duplicated type names + - No duplicated field names + - No duplicated enum values + - No usage of unknown types + +When using the [Dashboard API](/api-management/dashboard-configuration#manage-apis-api-definition) the response for a broken schema will be a *400 Bad Request* with a body containing the validation errors. For example: + +```json +{ + "Status": "Error", + "Message": "Invalid GraphQL schema", + "Meta": null, + "Errors": [ + "field 'Query.foo' can only be defined once" + ] +} +``` + +## GraphQL APIs headers + +Users can set up two kinds of headers when configuring GraphQL APIs: + +- Introspection headers +- Request headers + +Both types of headers can be set in the Advanced Options tab in Tyk Dashboard. + +### Introspection headers + +Tyk Dashboard can introspect any upstream GraphQL API and download a copy of the GQL schema. That schema will be displayed in the Schema tab. + +For protected upstreams that require authorization for introspection, Tyk allows you to persist authorization headers within the GraphQL API configuration using **Introspection headers**. + +Introspection headers + +Any header key/value pair defined in **Introspection headers** will only be used while making an introspection call from Tyk Dashboard to the upstream. Those headers will not be used while proxying requests from consumers to the upstream. + +**Introspection headers** can also be configured in the raw API definition: + +```json +... +"graphql": { + "execution_mode": "proxyOnly", + "proxy": { + "auth_headers": { + "admin-auth": "token-value" + } + } +} +``` + +### Request headers + +You can enrich any GraphQL request proxied through Tyk Gateway with additional information in the headers by configuring **Request headers** in the Tyk Dashboard. + +Request headers + +**Request headers** values can be defined as context variables. To know how to refer to request context variables check [this page](/api-management/traffic-transformation/request-context-variables). + +Any header key/value pair defined in **Request headers** will only be used to inject headers into requests proxied through the Gateway. It will not be used to introspect the upstream schema from Tyk Dashboard. + +**Request headers** can also be configured in the raw API definition: + +```bash +... +"graphql": { + "execution_mode": "proxyOnly", + "proxy": { + "request_headers": { + "context-vars-metadata": "$tyk_context.path", + "static-metadata": "static-value" + } + } +} +``` + +## Syncing GQL Schema + +A GraphQL Proxy API maintains a copy of the upstream GraphQL schema. When the upstream schema changes, these updates need to be reflected in the proxy schema. + +To manage this, Tyk Dashboard stores the timestamp of the last schema change each time a GraphQL API is updated. This timestamp helps identify whether the schema is outdated and needs to be synced with the upstream version. You can find this information above the schema editor. + +To sync the schema, click the **Resync** button. + + + + +Syncing schemas is only available for proxy-only GraphQL APIs and **not** for UDG. + + + +Sync Schema Button + +If your upstream is protected then you need to make sure you provide Tyk with the authorization details to execute the introspection query correctly. You can add those detail while [creating GQL API](/api-management/graphql#introspection-for-protected-upstreams) or using [Introspection headers](/api-management/graphql#introspection-headers) later on. + + + +## Persisting GraphQL queries + +Tyk Gateway `4.3.0` release includes a way to expose GraphQL queries as REST endpoints. For now, this can only be configured via the raw API definition, Tyk Dashboard support is coming soon. + +### How to persist GraphQL query + +The ability to expose a GraphQL query as a REST endpoint can be enabled by adding the `persist_graphql` section of the `extended_paths` on an HTTP type in any API version you intend to use to serve as the GraphQL query to REST endpoint proxy. + +Here is a sample REST API proxy for the HTTP type API: + +```json +{ + "name": "Persisted Query API", + "api_id": "trevorblades", + "org_id": "default", + "use_keyless": true, + "enable_context_vars": true, + "definition": { + "location": "header", + "key": "x-api-version" + }, + "proxy": { + "listen_path": "/trevorblades/", + "target_url": "https://countries.trevorblades.com", + "strip_listen_path": true + } +} +``` + +The target URL should point to a GraphQL upstream although this is a REST proxy. This is important for the feature to work. + +#### Adding versions + +On its own, this isn’t particularly remarkable. To enable GraphQL to REST middleware, modify the Default version like so: + +```json +{ + "name": "Persisted Query API", + "definition": { + "location": "header", + "key": "x-api-version" + }, + ... + "version_data": { + "not_versioned": true, + "default_version": "", + "versions": { + "Default": { + "name": "Default", + "expires": "", + "paths": { + "ignored": [], + "white_list": [], + "black_list": [] + }, + "use_extended_paths": true, + "global_headers": {}, + "global_headers_remove": [], + "global_response_headers": {}, + "global_response_headers_remove": [], + "ignore_endpoint_case": false, + "global_size_limit": 0, + "override_target": "", + "extended_paths": { + "persist_graphql": [ + { + "method": "GET", + "path": "/getContinentByCode", + "operation": "query ($continentCode: ID!) {\n continent(code: $continentCode) {\n code\n name\n countries {\n name\n }\n }\n}", + "variables": { + "continentCode": "EU" + } + } + ] + } + } + } + } +} +``` + +The vital part of this is the `extended_paths.persist_graphql` field. The `persist_graphql` object consists of three fields: + +`method`: The HTTP method used to access that endpoint, in this example, any GET requests to `/getContinentByCode` will be handled by the *persist graphql* middleware + +`path`: The path the middleware listens to + +`operation`: This is the GraphQL operation (`query` in this case) that is sent to the upstream. + +`variables`: A list of variables that should be included in the upstream request. + +If you run a request to your proxy, you should get a response similar to this: + +```json +{ + "data": { + "continent": { + "code": "EU", + "name": "Europe", + "countries": [ + { + "name": "Andorra" + }, + ... + ] + } + } +} +``` + +#### Dynamic variables + +We have seen support for passing static variable values via the API definition, but there will be cases where we want to extract variables from the request header or URL. More information about available request context variables in Tyk can be found [here](/api-management/traffic-transformation/request-context-variables) + +Below is an examples of using an incoming `code` header value as a variable in `persist_graphql` middleware configuration: + +```json +{ + "method": "GET", + "path": "/getCountryByCode", + "operation": "query ($countryCode: ID!) {\n country(code: $countryCode) {\n code\n name\n }\n}", + "variables": { + "countryCode": "$tyk_context.headers_Code" + } +} +``` + +Making a request to that endpoint and providing header `"code": "UK"`, should result in a response similar to this: + +```json +{ + "data": { + "country": { + "code": "UK", + "name": "United Kingdom" + } + } +} +``` + +Similarly, you can also pass variables in the request URL. Modify your `persist_graphql` block to this: + +```json +{ + "method": "GET", + "path": "/getCountryByCode/{countryCode}", + "operation": "query ($countryCode: ID!) {\n country(code: $countryCode) {\n code\n name\n }\n}", + "variables": { + "countryCode": "$path.countryCode" + } +} +``` + +If you now make a request to `/getCountryByCode/NG` you should get a result similar to this: + +```json +{ + "data": { + "country": { + "code": "NG", + "name": "Nigeria" + } + } +} +``` + +## Complexity Limiting + +Depending on the GraphQL schema an operation can cause heavy loads on the upstream by using deeply nested or resource-expensive operations. Tyk offers a solution to this issue by allowing you to control query depth and define its max value in a policy or directly on a key. + +### Deeply nested query + +Even if you have a simple GraphQL schema, that looks like this: + +```graphql +type Query { + continents: [Continent!]! +} + +type Continent { + name: String! + countries: [Country!]! +} + +type Country { + name: String! + continent: Continent! +} +``` + +There is a potential risk, that a consumer will try to send a deeply nested query, that will put a lot of load on your upstream service. An example of such query could be: + +```graphql +query { + continents { + countries { + continent { + countries { + continent { + countries { + continent { + countries { + name + } + } + } + } + } + } + } + } +} +``` + +### Query depth limit +Deeply nested queries can be limited by setting a query depth limitation. The depth of a query is defined by the highest amount of nested selection sets in a query. + +Example for a query depth of `2`: +```json +{ + continents { + name + } +} +``` + +Example for a query depth of `3`: +```json +{ + continents { + countries { + name + } + } +} +``` + +When a GraphQL operation exceeds the query depth limit the consumer will receive an error response (*403 Forbidden*): +```json +{ + "error": "depth limit exceeded" +} +``` + +#### Enable depth limits from the Dashboard + +Query depth limitation can be applied on three different levels: + +* **Key/Policy global limits and quota section. (`Global Limits and Quota`)** The query depth value will be applied on all APIs attached on a Key/Policy. + 1. *Optional:* Configure a Policy from **System Management > Policies > Add Policy**. + 2. From **System Management > Keys > Add Key** select a policy or configure directly for the key. + 3. Select your GraphQL API (marked as *GraphQL*). (if Policy is not applied on Key) + 4. Change the value for **Query depth**, from `Global Limits and Quota` by unchecking the *Unlimited query depth* checkmark and insert the maximum allowed query depth. + +query-depth-limit + +* **API limits and quota. (`Set per API Limits and Quota`)** This value will overwrite any value registered for query depth limitation on global Key/Policy level, and will be applied on all fields for Query and Mutation types defined within the API schema. + 1. *Optional:* Configure a Policy from **System Management > Policies > Add Policy**. + 2. From **System Management > Keys > Add Key** select a policy or configure directly for the key. + 3. Select your GraphQL API (marked as *GraphQL*). (if Policy is not applied on Key) + 4. Enable `Set per API Limits and Quota` section. + 5. Change the value for **Query depth**, from API level, by unchecking the *Unlimited query depth* checkmark and insert the maximum allowed query depth + +query-depth-limit + +* **API per query depth limit. (`Set per query depth limits`)** By setting a query depth limit value on a specific Query/Mutation type field, will take highest priority and all values set on first 2 steps will be overwritten. + 1. *Optional:* Configure a Policy from **System Management > Policies > Add Policy**. + 2. From **System Management > Keys > Add Key** select a policy or configure directly for the key. + 3. Select your GraphQL API (marked as *GraphQL*). (if Policy is not applied on Key) + 4. Enable `Set per query depth limits` section. + 5. Add as many queries you want to apply depth limitation on. + +query-depth-limit + + +#### Enable depth limits using Tyk APIs + +You can set the same query depth limits using the Tyk Gateway API (for open-source users) or Tyk Dashboard API. To make it easier we have [Postman collections](https://www.postman.com/tyk-technologies/workspace/tyk-public-workspace/overview) you can use. + +**Global query depth limit for Key/Policy** + +In the key/policy json you need to make sure this section has your desired `max_query_depth` set: + +```yaml +{... + "rate": 1000, + "per": 60, + "max_query_depth": 5 +...} +``` + +**Per API depth limits** + +In the key/policy json you need to make sure that this section is set correctly: + +```yaml +{ + ... + "access_rights_array": [ + { + "api_name": "trevorblades", + "api_id": "68496692ef5a4cb35a2eac907ec1c1d5", + "versions": [ + "Default" + ], + "allowed_urls": [], + "restricted_types": [], + "allowed_types": [], + "disable_introspection": false, + "limit": { + "rate": 1000, + "per": 60, + "throttle_interval": -1, + "throttle_retry_limit": -1, + "max_query_depth": 3, + "quota_max": -1, + "quota_renews": 0, + "quota_remaining": 0, + "quota_renewal_rate": -1, + "set_by_policy": false + }, + "field_access_rights": [], + "allowance_scope": "" + } + ] + ... +} +``` + +**API per query depth limits** + +If you have more than one query in your schema and you want to set different depth limits for each of those, Tyk also allows you to do that. In this case you need to make sure, that `field_access_rights` per API are set correctly: + +```yaml +{ + ... + "access_rights_array": [ + { + "api_name": "trevorblades", + "api_id": "68496692ef5a4cb35a2eac907ec1c1d5", + "versions": [ + "Default" + ], + "allowed_urls": [], + "restricted_types": [], + "allowed_types": [], + "disable_introspection": false, + "limit": null, + "field_access_rights": [ + { + "type_name": "Query", + "field_name": "continents", + "limits": { + "max_query_depth": 3 + } + }, + { + "type_name": "Query", + "field_name": "countries", + "limits": { + "max_query_depth": 5 + } + } + ], + "allowance_scope":"" + } + ] + ... +} +``` + + + +Setting the depth limit to `-1` in any of the above examples will allow *Unlimited* query depth for your consumers. + + + +## Field Based Permissions + +You may want to allow different consumers access to your GraphQL API without exposing all data to them. So for example this could be a schema for a GraphQL API: + +```graphql +type Query { + accounts: [Account!] +} + +type Account { + owner: String! + number: ID! + balance: Float! +} +``` + +For one type of consumer, it will be fine to query all data the schema exposes, while for another type of consumer it should not be allowed to retrieve the `balance` for example. + +Field access can be restricted by setting up *field based permissions* in a policy or directly on a key. + +When a field is restricted and used in a GraphQL operation, the consumer will receive an error response (*400 Bad Request*): + +```yaml +{ + "errors": [ + { + "message": "field: balance is restricted on type: Account" + } + ] +} +``` +### Field based permissions with the list of allowed types +Field access can be restricted by setting up an allowed types list in a policy or directly on a key. If new fields are added to the GraphQL schema, you don't need to update the field-based permissions. This is because the fields that are not in the list of allowed types are automatically access-restricted. + +First, you need to learn [how to create a security policy with the API](/api-management/gateway-config-managing-classic#secure-an-api) or [how to create an API Key with the API](/api-management/gateway-config-managing-classic#secure-an-api). + +Once you learn how to utilize the API to create a security policy or key, you can use the following snippet: + +```yaml +{ + "access_rights": { + "{API-ID}": { + "api_id": "{API-ID}", + "api_name": "{API-NAME}", + "allowed_types": [ + { + "name": "Query", + "fields": ["accounts"] + }, + { + "name": "Account", + "fields": ["owner"] + } + ] + } + } +} +``` +With this configuration, a consumer can only access the field called the `owner`. When any other fields are used in a GraphQL operation, the consumer will receive an error response *(400 Bad Request)*: + +```yaml +{ + "errors": [ + { + "message": "field: balance is restricted on type: Account" + } + ] +} +``` +It's important to note that once you set a list of allowed types, Tyk will use this list to control access rights and disable the list of restricted types. The same behavior will occur if an asterisk operator is used to control access. + +### Allow or restrict all fields with the asterisk operator + +You can allow or restrict all fields of a type by using an asterisk (*) operator. Any new fields of that type will be allowed or blocked by default. For example: + +```yaml +{ + "access_rights": { + "{API-ID}": { + "api_id": "{API-ID}", + "api_name": "{API-NAME}", + "allowed_types": [ + { + "name": "Query", + "fields": ["*"] + }, + { + "name": "Account", + "fields": ["*"] + } + ] + } + } +} +``` +With this configuration, the consumers are allowed to access all current and future fields of the `Query` and `Account` types. Please note that the asterisk operator does not work recursively. For example, in the example below, the asterisk operator only allows access to fields of the `Query` type. Fields of the `Account` type remain restricted. + +```yaml +{ + "access_rights": { + "{API-ID}": { + "api_id": "{API-ID}", + "api_name": "{API-NAME}", + "allowed_types": [ + { + "name": "Query", + "fields": ["*"] + } + ] + } + } +} +``` +The asterisk operator also works for the list of restricted types: + +```yaml +{ + "access_rights": { + "{API-ID}": { + "api_id": "{API-ID}", + "api_name": "{API-NAME}", + "restricted_types": [ + { + "name": "Query", + "fields": ["accounts"] + }, + { + "name": "Account", + "fields": ["*"] + } + ] + } + } +} +``` + +The configuration above restricts access to all fields of the `Account` type. + +Please note that the list of allowed types overrides the list of restricted types. + + + +### Setup field based permissions in Dashboard + +Restricted and allowed types and fields can also be set up via Tyk Dashboard. + +1. *Optional:* Configure a Policy from **System Management > Policies > Add Policy**. +2. From **System Management > Keys > Add Key** select a policy or configure directly for the key. +3. Select your GraphQL API (marked as *GraphQL*). +4. Enable either **Block list** or **Allow list**. By default, both are disabled. It's not possible to have both enabled at the same time - enabling one switch automatically disables the other. + +#### Block list + +By default all *Types* and *Fields* will be unchecked. By checking a *Type* or *Field* you will disallow to use it for any GraphQL operation associated with the key. + +For example, the settings illustrated below would block the following: +- `code` and `countries` fields in `Continent` type. +- `latt` and `longt` fields in `Coordinates` type. + +field-based-permissions + +#### Allow list + +By default all *Types* and *Fields* will be unchecked. By checking a *Type* or *Field* you will allow it to be used for any GraphQL operation associated with the key. + +For example, the settings illustrated below would only allow the following: +- `code` field in `Continent` type. +- `code` and `name` fields in `Language` type. + +Note that the `Query` type is unchecked, which indicates that all fields in `Query` type are unchecked. Subsequently, you will not be able to run any query. + +field-based-permissions + +## GraphQL Federation + +### Overview + +#### Federation Version Support + +Tyk supports Federation v1 + +#### What is federation? + +Ease-of-use is an important factor when adopting GraphQL either as a provider or a consumer. Modern enterprises have dozens of backend services and need a way to provide a unified interface for querying them. Building a single, monolithic GraphQL service is not the best option. It leads to a lot of dependencies, over-complication and is hard to maintain. + +To remedy this, Tyk, with release 4.0 offers GraphQL federation that allows you to divide GQL implementation across multiple back-end services, while still exposing them all as a single graph for the consumers. + +GraphQL federation flowchart + +#### Subgraphs and supergraphs + +**Subgraph** is a representation of a back-end service and defines a distinct GraphQL schema. It can be queried directly as a separate service or it can be federated into a larger schema of a supergraph. + +**Supergraph** is a composition of several subgraphs that allows the execution of a query across multiple services in the backend. + +#### Subgraphs examples + +**Users** +```graphql +extend type Query { + me: User +} + +type User @key(fields: "id") { + id: ID! + username: String! +} +``` + +**Products** + +```graphql +extend type Query { + topProducts(first: Int = 5): [Product] +} + +extend type Subscription { + updatedPrice: Product! + updateProductPrice(upc: String!): Product! + stock: [Product!] +} + +type Product @key(fields: "upc") { + upc: String! + name: String! + price: Int! + inStock: Int! +} +``` + +**Reviews** + +```graphql +type Review { + body: String! + author: User! @provides(fields: "username") + product: Product! +} + +extend type User @key(fields: "id") { + id: ID! @external + username: String! @external + reviews: [Review] +} + +extend type Product @key(fields: "upc") { + upc: String! @external + reviews: [Review] +} +``` + +#### Subgraph conventions + +- A subgraph can reference a type that is defined by a different subgraph. For example, the Review type defined in the last subgraph includes an `author` field with type `User`, which is defined in a different subgraph. + +- A subgraph can extend a type defined in another subgraph. For example, the Reviews subgraph extends the Product type by adding a `reviews` field to it. + +- A subgraph has to add a `@key` directive to an object’s type definition so that other subgraphs can reference or extend that type. The `@key` directive makes an object type an entity. +#### Supergraph schema + +After creating all the above subgraphs in Tyk, they can be federated in your Tyk Gateway into a single supergraph. The schema of that supergraph will look like this: + +```graphql +type Query { + topProducts(first: Int = 5): [Product] + me: User +} + +type Subscription { + updatedPrice: Product! + updateProductPrice(upc: String!): Product! + stock: [Product!] +} + +type Review { + body: String! + author: User! + product: Product! +} + +type Product { + upc: String! + name: String! + price: Int! + inStock: Int! + reviews: [Review] +} + +type User { + id: ID! + username: String! + reviews: [Review] +} +``` + +#### Creating a subgraph via the Dashboard UI + +1. Log in to the Dashboard and go to APIs > Add New API > Federation > Subgraph. +Add federation subgraph + +2. Choose a name for the subgraph and provide an upstream URL. + + + + + Note + + In case your upstream URL is protected, select **Upstream Protected** and provide authorization details (either Header or Certificate information). + + + +Add upstream URL + +3. Go to Configure API and configure your subgraph just as you would any other API in Tyk. + + + + + Note + + In v4.0, subgraphs will be set to **Internal** by default. + + + +4. Once you have configured all the options, click Save. The subgraph is now visible in the list of APIs. +Subgraph API listing + +#### Creating a supergraph via the Dashboard UI +1. Log in to the Dashboard and go to APIs > Add New API > Federation > Supergraph. +Add supergraph API + +2. In the Details section, select all the subgraphs that will be included in your supergraph. +Select subgraphs + +3. Go to Configure API and configure your supergraph just as you would any other API in Tyk. +4. Once you configure all the options, click Save. The supergraph is now available in your list of APIs. +Supergraph API listing + +#### Defining Headers +In v4.0 you can define global (Supergraph) headers. Global headers are forwarded to all subgraphs that apply to the specific upstream request. + +##### Setting a Global Header + +1. After creating your supergraph, open the API in your Dashboard. +2. From the Subgraphs tab, click Global Headers. +Global Header setup for a supergraph + +3. Enter your header name and value. You can add more headers by clicking Add Headers. +Add further Global headers in a supergraph + +4. Click **Update** to save the header. +5. On the pop-up that is displayed, click Update API. +6. If you want to delete a global header, click the appropriate bin icon for it. +7. You can update your headers by repeating steps 2-5. + +### Entities + +#### Defining the base entity + +- Must be defined with the @key directive. +- The "fields" argument of the @key directive must reference a valid field that can uniquely identify the entity. +- Multiple primary keys are possible. + +An example is provided below: + +**Subgraph 1 (base entity)** + +```graphql +type MyEntity @key(fields: "id") @key(fields: "name") { + id: ID! + name: String! +} +``` + +#### Extending entities + +Entities cannot be shared types (be defined in more than one single subgraph; see **Entity stubs** below). + +The base entity remains unaware of fields added through extension; only the extension itself is aware of them. + +Attempting to extend a non-entity with an extension that includes the @key directive or attempting to extend a base entity with an extension that does not include the @key directive will both result in errors. + +The primary key reference should be listed as a field with the @external directive. + +Below is an example extension for **MyEntity** (which was defined above in **Subgraph 1**): + +**Subgraph 2 (extension):** + +```graphql +extend type MyEntity @key(fields: "id") { + id: ID! @external + newField: String! +} +``` + +#### Entity stubs +If one subgraph references a base entity (an entity defined in another subgraph) without adding new fields, that reference must be declared as a stub. In **federation v1**, stubs appear similar to extensions but do not add any new fields. + +An entity stub contains the minimal amount of information necessary to identify the entity (referencing exactly one of the primary keys from the base entity regardless of whether there are multiple primary keys on the base entity). + +The identifying primary key should feature the @external directive. + +For example, a stub of **MyEntity** (which was defined above in **Subgraph 1**): + +**Subgraph 3 (stub):** + +```graphql +extend type MyEntity @key(fields: "id") { + id: ID! @external +} +``` + +##### What is a shared type? +Types that are identical by name and structure and feature in more than one subgraph are shared types. + +##### Can I extend a shared type? +Subgraphs are normalized before federation. This means you can extend a type if the resolution of the extension after normalization is exactly identical to the resolution of the type after normalization in other subgraphs. + +Unless the resolution of the extension in a single subgraph is exactly identical to all other subgraphs, extension is not possible. + +Here is a valid example where both subgraphs resolve to identical enums after normalization: + +**Subgraph 1:** + +```graphql +enum Example { + A, + B +} + +extend enum Example { + C +} +``` + +**Subgraph 2:** + +```graphql +enum Example { + A, + B, + C +} +``` + +Here, the enum named Example in **Subgraph 1** resolves to be identical to the enum named Example in **Subgraph 2**. + +However, if we were to include **Subgraph 3**, which does not feature the β€œC” value, the enum is no longer identical in all 3 subgraphs. Consequently, federation would fail. + +**Subgraph 3:** + +```graphql +enum Example { + A, + B +} +``` + + + +### Extension Orphans + +#### What is an extension orphan? + +An extension orphan is an unresolved extension of a type after federation has completed. This will cause federation to fail and produce an error. + +#### How could an extension orphan occur? + +You may extend a type within a subgraph where the base type (the original definition of that type) is in another subgraph. This means that it is only after the creation of the supergraph that it can be determined whether the extension was valid. If the extension was invalid or was otherwise unresolved, an β€œextension orphan” would remain in the supergraph. + +For example, the type named Person does not need to be defined in **Subgraph 1**, but it must be defined in exactly one subgraph (see **Shared Types**: extension of shared types is not possible, so extending a type that is defined in multiple subgraphs will produce an error). + +**Subgraph 1** + +```graphql +extend type Person { + name: String! +} +``` + +If the type named Person were not defined in exactly one subgraph, federation will fail and produce an error. + + + +## GraphQL WebSockets + +Tyk supports GraphQL via WebSockets using the protocols _graphql-transport-ws_ or _graphql-ws_ between client and Tyk Gateway. + +Before this feature can be used, WebSockets need to be enabled in the Tyk Gateway configuration. To enable it set [http_server_options.enable_websockets](/tyk-oss-gateway/configuration#http_server_optionsenable_websockets) to `true` in your `tyk.conf` file. + + + +You can find the full documentation of the _graphql-transport-ws_ protocol itself [here](https://github.com/enisdenjo/graphql-ws/tree/master). + +In order to upgrade the HTTP connection for a GraphQL API to WebSockets by using the _graphql-transport-ws_ protocol, the request should contain following headers: + +``` +Connection: Upgrade +Upgrade: websocket +Sec-WebSocket-Key: +Sec-WebSocket-Version: 13 +Sec-WebSocket-Protocol: graphql-transport-ws +``` + +**Messages** + +The connection needs to be initialised before sending Queries, Mutations, or Subscriptions via WebSockets: + +``` +{ "type": "connection_init" } +``` + +Always send unique IDs for different Queries, Mutations, or Subscriptions. + +For Queries and Mutations, the Tyk Gateway will respond with a `complete` message, including the GraphQL response inside the payload. + +```json +{ "id": "1", "type": "complete" } +``` + +For Subscriptions, the Tyk Gateway will respond with a stream of `next` messages containing the GraphQL response inside the payload until the data stream ends with a `complete` message. It can happen infinitely if desired. + + + +Be aware of those behaviors: + - If no `connection_init` message is sent after 15 seconds after opening, then the connection will be closed. + - If a duplicated ID is used, the connection will be closed. + - If an invalid message type is sent, the connection will be closed. + + + +**Examples** + +**Sending queries** + +``` +{"id":"1","type":"subscribe","payload":{"query":"{ hello }"}} +``` + +**Sending mutations** + +``` +{"id":"2","type":"subscribe","payload":{"query":"mutation SavePing { savePing }"}} +``` + +**Starting and stopping Subscriptions** + +``` +{"id":"3","type":"subscribe","payload":{"query":"subscription { countdown(from:10) }" }} +``` +``` +{"id":"3","type":"complete"} +``` + + +In order to upgrade the HTTP connection for a GraphQL API to WebSockets by using the _graphql-ws_ protocol, the request should contain following headers: + +``` +Connection: Upgrade +Upgrade: websocket +Sec-WebSocket-Key: +Sec-WebSocket-Version: 13 +Sec-WebSocket-Protocol: graphql-ws +``` + +**Messages** + +The connection needs to be initialised before sending Queries, Mutations, or Subscriptions via WebSockets: + +``` +{ "type": "connection_init" } +``` + +Always send unique IDs for different Queries, Mutations, or Subscriptions. + +For Queries and Mutations, the Tyk Gateway will respond with a `complete` message, including the GraphQL response inside the payload. + +For Subscriptions, the Tyk Gateway will respond with a stream of `data` messages containing the GraphQL response inside the payload until the data stream ends with a `complete` message. It can happen infinitely if desired. + +**Examples** + +**Sending queries** + +``` +{"id":"1","type":"start","payload":{"query":"{ hello }"}} +``` + +**Sending mutations** + +``` +{"id":"2","type":"start","payload":{"query":"mutation SavePing { savePing }"}} +``` + +**Starting and stopping Subscriptions** + +``` +{"id":"3","type":"start","payload":{"query":"subscription { countdown(from:10) }" }} +``` +``` +{"id":"3","type":"stop"} +``` + + + +### Upstream connections + +For setting up upstream connections (between Tyk Gateway and Upstream) please refer to the [GraphQL Subscriptions Key Concept](/api-management/graphql#graphql-subscriptions). + + +## GraphQL Subscriptions + +Tyk **natively** supports also GraphQL subscriptions, so you can expose your full range of GQL operations using Tyk Gateway. Subscriptions support was added in `v4.0.0` in which *graphql-ws* protocol support was introduced. + +With the release of Tyk `v4.3.0` the number of supported subscription protocols has been extended. + +In Tyk subscriptions are using the [WebSocket transport](https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API) for connections between the client and Gateway. For connections between Gateway and upstream WebSockets or [SSE](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) can be used. + +### Supported transports and protocols + +| Transport | Protocol | +| :------------ | :-------------------------------------------------------------------------------------------------------------------------- | +| WebSockets | [graphql-ws](http://github.com/apollographql/subscriptions-transport-ws) (default, no longer maintained) | +| WebSockets | [graphql-transport-ws](http://github.com/enisdenjo/graphql-ws) | +| HTTP | [Server-Sent Events (SSE)](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) | + +#### Setting up subscription types via API definition +Subscription types or subscription transports/protocols are set inside the graphql section of the API definition. + +Depending on whether you want to configure GraphQL proxy-only, UDG, or GraphQL Federation there are different places for the configuration option. + +The values for subscription types are the same on all API types: +- `graphql-ws` +- `graphql-transport-ws` +- `sse` (Server-Sent Events) + +##### HTTP method for Server-Sent Event subscription + +When using `subscription_type=sse`, Tyk will use the HTTP `GET` method to subscribe to the upstream service. For some use cases, for example, to support larger subscription payloads or to increase security by keeping the subscription payload out of server logs, the upstream requires HTTP `POST`. In Tyk 5.9.0, we have added `POST` support for SSE with the introduction of the boolean `use_sse_post` option, which is only relevant if `subscription_type=sse`. + +```json +{ + "graphql": { + "proxy": { + "subscription_type": "sse", + "sse_use_post": true + } + } +} +``` + +If you need to use HTTP `GET` then you can omit `sse_use_post` or set it to `false`. + + +##### GraphQL Proxy + +``` +{ + ..., + "graphql": { + ..., + "proxy": { + ..., + "subscription_type": "graphql-ws" + } + } +} +``` + +##### Universal Data Graph + +``` +{ + ..., + "graphql": { + ..., + "engine": { + ..., + "data_sources": [ + ..., + { + ..., + "subscription_type": "sse" + } + ] + } + } +} +``` + +##### Federation + +``` +{ + ..., + "graphql": { + ..., + "supergraph": { + ..., + "subgraphs": [ + ..., + { + ..., + "subscription_type": "graphql-transport-ws" + } + ] + } + } +} +``` + + + +If the upstream subscription GraphQL API is protected please enable the authentication via query params to pass the header through. + + + +There is no need to enable subscriptions separately. They are supported alongside GraphQL as a standard. The only requirement for subscriptions to work is to [enable WebSockets](/api-management/graphql#graphql-websockets) in your Tyk Gateway configuration file. + +Here's a general sequence diagram showing how subscriptions in Tyk work exactly: + +Tyk Subscriptions workflow + +## GraphQL playground + +When you are creating or editing your GraphQL API, any change you make can be tested using Tyk Dashboard built-in GraphiQL Playground. + +Playground + +At the top of the Playground itself, you can switch between Dark and Light theme using the `Set theme` dropdown. + +There's also a built in `Explorer` to help with query building and a `Prettify` button that helps to make the typed out operation easier to read. + +The GraphiQL try-out playground comes with a series of features by default, which can be very useful while configuring the API: + 1. Syntax highlighting. + 2. Intelligent type ahead of fields, arguments, types, and more. + 3. Real-time error highlighting and reporting for queries and variables. + 4. Automatic query and variables completion. + 5. Automatically adds required fields to queries. + 6. Documentation explorer, search, with markdown support. + 7. Query History using local storage + 8. Run and inspect query results using any promise that resolves JSON results. 9. HTTPS or WSS not required. + 10. Supports full GraphQL Language Specification: Queries, Mutations, Subscriptions, Fragments, Unions, directives, multiple operations per query, etc + +### GraphQL Playgrounds in Tyk + +Tyk offers you two types of Playgrounds, depending on who should be authorized to use them. + +* **Playground** tab in `API Designer`, that's only accessible via Tyk Dashboard and is always enabled. You need to log into the Tyk Dashboard to be able to use it. +* **Public Playground** that you can enable for any GraphQL API and that is accessible for any consumer interacting with your GQL API. This playground will follow all security rules you set for your GQL API - authentication, authorization, etc. + + + + + The Public Playground relies on assets in the `playground` folder under [template_path](/tyk-oss-gateway/configuration#template_path) (default: `/opt/tyk-gateway/templates`). + If you change this path, be sure to copy the `playground` folder to the new location to preserve functionality. + + + +#### Enabling Public GraphQL Playground + + + + +To enable a Public GraphQL Playground for one of your GQL APIs follow these few simple steps: + +1. Navigate to `Core Settings` tab in `API designer` +2. Change the setting in `Enable API Playground` section. +3. Provide `Playground path`. By default, this path is set to `/playground` but you can change it. + +Headers + +Your `Public Playground` will be available at `http://{API-URL}/playground`. + + + + +To enable Public GraphQL Playground using just Tyk API definition, you need to set the following: + +```bash +... +"graphql": { + "playground": { + "enabled": true, + "path": "/playground" + } + } +... +``` + +You can choose yourself the `path` name. + +Your `Public Playground` will be available at `http://{API-URL}/playground`. + + + + +#### Query variables + +You can pass query variables in two different ways, both are fully supported in Tyk Dashboard. + +##### Using inline arguments in GraphiQL Playground + +A query or mutation string in this case, would be written like in the example below and there would be no other requirements for executing an operation like this: + +```graphql +mutation createUser { + createUser(input: { + username: "test", + email: "test@test.cz", + phone: "479332973", + firstName: "David", + lastName: "Test" + }) { + user { + id + username + email + phone + firstName + lastName + } + } +} +``` + +##### Using query variables in GraphiQL Playground + +For complex sets of variables, you might want to split the above example into two parts: GQL operation and variables. + +The operation itself would change to: + +```graphql +mutation createUser($input: CreateUserInput!) { + createUser(input: $input) { + user { + id + username + email + phone + firstName + lastName + } + } +} +``` + +The values for variables would need be provided in the `Query variables` section of the Playground like this: + +```graphql +{ + "input": { + "username": "test", + "email": "test@test.cz", + "phone": "479332973", + "firstName": "David", + "lastName": "Test" + } +} +``` + +#### Headers + +Debugging a GraphQL API might require additional headers to be passed to the requests while playing with the GraphiQL interface (i.e. `Authorization` header in case of Authentication Token protection over the API). This can be done using the dedicated headers tab in the Graphiql IDE. + +Headers + +You can also [forward headers](/api-management/graphql#graphql-apis-headers) from your client request to the upstream data sources. + + +#### Logs + + + +GraphQL request logs described below are **only available in Tyk Dashboard**. + + + +Besides the results displayed in the GraphiQL playground, Tyk also provides you with a full list of logs of the triggered request, which can help a lot when debugging the API functionality. + +Logs + +The Request Logs can be seen under the playground itself. When no logs are present, there will be no option to expand the logs, and the filter buttons (top right) will be disabled: + +Logs Bar + +After creating and sending a query, the logs will automatically expand, and the filter buttons will display the number of logs for its respective level (category). + +Logs table + +##### Contents of the logs + +There are four levels (categories) of logs: `Info`, `Debug`, `Warning`, and `Error`, and each log belongs to one of these levels. + +The first column of the table displays the color-coded `β€œlevel”` property of the log. A log should never be absent of a level. The second column displays the log `β€œmsg”` (message) property, if any. The third column displays the `β€œmw” `(middleware) property, if any. + +##### Expansion/collapse of Request Logs + +The Request Logs can be expanded or collapsed, using the chevron on the left side to toggle these states. + +##### Filter buttons and states + +Filter buttons have two states: active and inactive; the default of which is active. A solid background color of the button indicates that a filter is active. + +In the below picture, the `info` and `error` filters buttons are both active. If there are no logs for a particular level of log, the button will appear as a gray and disabled, as shown by the `Warning` filter button. + +Logs navigation + +Here's an example where there is at least one log, but all the filter buttons are in the inactive state. If the cursor (not shown) hovers over an inactive filter button, the button background will change to solid, and the tooltip will display `β€œShow”`. + +If all filter buttons are inactive, a message asking whether the user would like to reset all filters will display. Clicking this text will activate all available filters. + +Logs empty + +## Migrating to 3.2 + +As of 3.2 GraphQL schema for Tyk API definitions (i.e `api_definition.graphql`) changed significantly, hence GraphQL API definitions created in previous beta versions are not supported via the UI and need to go through a manual migration. + + + +Before you continue, we strongly advise to simply create a new API and avoid migration of the API definition. You'll achieve results faster and can avoid typos and errors that happens with the manual migration. + + + + + +Old API definitions will continue to work for the Tyk Gateway + + + + +### The changes +- To improve performance now a single Data Source can be used to link to multiple fields instead of having an independent data source for every field hence `graphql.type_field_configurations` is now obsolete and new data sources can be defined under `graphql.engine.data_sources` (see example below). + +- Data Source kind are `REST` or `GraphQL` regardless of your API being internal or not. + +- In case of internal APIs that are accessed via `tyk://`scheme, the `graphql.engine.data_sources[n].internal` property is set to true. + +- Each dataSources needs to be defined with a unique name `graphql.engine.data_sources[n].name`. + +- Each field connected to the data source is expected to be configured for mapping under `graphql.engine.field_configs` regardless of it requiring mapping or not. + +- It is important that all new GraphQL APIs have the version `graphql.version` property set to `2`. + +### Examples + +#### Old Data Source Config + +```json +"type_field_configurations": [ + { + "type_name": "Query", + "field_name": "pet", + "mapping": { + "disabled": true, + "path": "" + }, + "data_source": { + "kind": "HTTPJSONDataSource", + "data_source_config": { + "url": "https://petstore.swagger.io/v2/pet/{{.arguments.id}}", + "method": "GET", + "body": "", + "headers": [], + "default_type_name": "Pet", + "status_code_type_name_mappings": [ + { + "status_code": 200, + "type_name": "" + } + ] + } + } + }, + { + "type_name": "Query", + "field_name": "countries", + "mapping": { + "disabled": false, + "path": "countries" + }, + "data_source": { + "kind": "GraphQLDataSource", + "data_source_config": { + "url": "https://countries.trevorblades.com", + "method": "POST" + } + } + }, +] +``` + +#### New Data Source Config + +```json +"engine": { + "field_configs": [ + { + "type_name": "Query", + "field_name": "pet", + "disable_default_mapping": true, + "path": [ + "" + ] + }, + { + "type_name": "Query", + "field_name": "countries", + "disable_default_mapping": false, + "path": [ + "countries" + ] + }, + ], + "data_sources": [ + { + "kind": "REST", + "name": "PetStore Data Source", + "internal": false, + "root_fields": [ + { + "type": "Query", + "fields": [ + "pet" + ] + } + ], + "config": { + "url": "https://petstore.swagger.io/v2/pet/{{.arguments.id}}", + "method": "GET", + "body": "", + "headers": {}, + } + }, + { + "kind": "GraphQL", + "name": "Countries Data Source", + "internal": false, + "root_fields": [ + { + "type": "Query", + "fields": [ + "countries" + ] + } + ], + "config": { + "url": "https://countries.trevorblades.com", + "method": "POST", + "body": "" + } + } + ] +}, +``` + +#### Example of new graphql definition + +``` json +"graphql" : { + "schema": "type Mutation {\n addPet(name: String, status: String): Pet\n}\n\ntype Pet {\n id: Int\n name: String\n status: String\n}\n\ntype Query {\n default: String\n}\n", + "enabled": true, + "engine": { + "field_configs": [ + { + "type_name": "Mutation", + "field_name": "addPet", + "disable_default_mapping": true, + "path": [""] + }, + { + "type_name": "Pet", + "field_name": "id", + "disable_default_mapping": true, + "path": [""] + }, + { + "type_name": "Query", + "field_name": "default", + "disable_default_mapping": false, + "path": ["default"] + } + ], + "data_sources": [ + { + "kind": "REST", + "name": "Petstore", + "internal": false, + "root_fields": [ + { + "type": "Mutation", + "fields": ["addPet"] + } + ], + "config": { + "url": "https://petstore.swagger.io/v2/pet", + "method": "POST", + "body": "{\n \"name\": \"{{ .arguments.name }}\",\n \"status\": \"{{ .arguments.status }}\"\n}", + "headers": { + "qa": "{{ .request.header.qa }}", + "test": "data" + }, + } + }, + { + "kind": "REST", + "name": "Local Data Source", + "internal": false, + "root_fields": [ + { + "type": "Pet", + "fields": ["id"] + } + ], + "config": { + "url": "http://localhost:90909/graphql", + "method": "HEAD", + "body": "", + "headers": {}, + } + }, + { + "kind": "GraphQL", + "name": "asd", + "internal": false, + "root_fields": [ + { + "type": "Query", + "fields": ["default"] + } + ], + "config": { + "url": "http://localhost:8200/{{.arguments.id}}", + "method": "POST", + } + } + ] + }, + "execution_mode": "executionEngine", + "version": "2", + "playground": { + "enabled": false, + "path": "" + }, + "last_schema_update": "2021-02-16T15:05:27.454+05:30" +} +``` + diff --git a/api-management/graphql/graphql-schema-types.mdx b/api-management/graphql/graphql-schema-types.mdx new file mode 100644 index 000000000..47489e40c --- /dev/null +++ b/api-management/graphql/graphql-schema-types.mdx @@ -0,0 +1,319 @@ +--- +title: "GraphQL Schema Types" +description: "Understanding GraphQL schema types and how they work with Tyk" +keywords: "GraphQL, Schema, Types, Custom Scalars" +sidebarTitle: "Schema Types" +--- + +## Introduction + +When working with GraphQL APIs in Tyk, understanding the different schema types is important for proper API design and implementation. This page covers the standard types supported by Tyk, custom scalar types, and best practices for type definitions. + +## Standard GraphQL Types + +Tyk supports all standard GraphQL types as defined in the [GraphQL specification](https://spec.graphql.org/October2021/): + +### Scalar Types + +Scalar types are the fundamental building blocks of your schema, representing actual data values. + +- `Int`: 32-bit integer +- `Float`: Double-precision floating-point value +- `String`: UTF-8 character sequence +- `Boolean`: `true` or `false` +- `ID`: Unique identifier, serialized as a String + +### Object Types + +Object types define collections of fields and are the most common type in GraphQL schemas. They model complex entities and can include fields of any type, enabling rich, nested data structures. + +```graphql +type User { + id: ID! + name: String! + age: Int + isActive: Boolean +} +``` + +### Interface Types + +Interfaces are abstract types that define a set of fields that implementing object types must include. + +```graphql +interface Node { + id: ID! +} + +type User implements Node { + id: ID! + name: String! + email: String +} + +type Product implements Node { + id: ID! + name: String! + price: Float! +} +``` + +### Union Types + +Unions represent an object that could be one of several object types, but don't share common fields like interfaces. + +```graphql +union SearchResult = User | Product | Article + +type Query { + search(term: String!): [SearchResult!]! +} +``` + +When querying a union, you need to use inline fragments: + +```graphql +{ + search(term: "example") { + ... on User { id name } + ... on Product { id price } + ... on Article { title content } + } +} +``` + +### Input Types + +Input types are special object types used specifically for arguments. They make complex operations more manageable by grouping related arguments, particularly useful for mutations. + +```graphql +input UserInput { + name: String! + age: Int + email: String! +} +``` + +### Enum Types + +Enums restrict fields to specific allowed values, improving type safety and self-documentation in your API. + +```graphql +enum UserRole { + ADMIN + EDITOR + VIEWER +} +``` + +### List and Non-Null Types +GraphQL provides two type modifiers: + +- Non-Null (`!`): Indicates that the value cannot be null +- List (`[]`): Indicates that the value is an array of the specified type + +These modifiers can be combined: + +```graphql +type Collection { + requiredItemsRequired: [Item!]! # Non-null list of non-null items + optionalItemsRequired: [String!] # Nullable list of non-null items + requiredItemsOptional: [String]! # Non-null list of nullable items + optionalItemsOptional: [String] # Nullable list of nullable items + nestedRequiredItemsRequired: [[String!]!]! # nested non-nullable list in non-nullable list with non-null items +} +``` + +## Custom Scalar Types + +### Implementation in Tyk +Tyk supports custom scalar types through the underlying GraphQL engine. While Tyk passes custom scalar values through its system, the actual validation, parsing, and serialization of these values should be implemented in your upstream service. + +### Using the @specifiedBy Directive +The `@specifiedBy` directive allows you to provide a URL to the specification for a custom scalar type: + +```graphql +scalar DateTime @specifiedBy(url: "https://tools.ietf.org/html/rfc3339") +scalar UUID @specifiedBy(url: "https://tools.ietf.org/html/rfc4122") +``` + +### Common Custom Scalar Types + +#### JSON Scalar + +The JSON scalar handles arbitrary JSON data, useful for dynamic structures without defining every possible field. + +```graphql +scalar JSON + +type Configuration { + settings: JSON +} +``` + +#### Long/BigInt +```graphql +scalar Long + +type Transaction { + amount: Long + timestamp: Long +} +``` + + + +**Note:** + +According to the [GraphQL spec](https://spec.graphql.org/), `Long/BigInt` values must be serialized as **strings** (IEEE standard). Some libraries incorrectly serialize them as numbers, which can lead to compatibility issues. + +Tyk’s GraphQL engine expects `Long` values to be serialized as strings to ensure interoperability. + + + +**Example:** + +```json +{ + "amount": "9223372036854775807", + "timestamp": "1690991344000" +} +``` + +#### DateTime +```graphql +scalar DateTime + +type Event { + startTime: DateTime + endTime: DateTime +} +``` + +## GraphQL Federation Types + +Tyk supports [GraphQL Federation v1](/api-management/graphql#graphql-federation) for building unified APIs across multiple services. + +### Entity Types with @key + +The @key directive is fundamental to federation. It identifies fields that can be used to uniquely identify entities across services: + +```graphql +# In the Users service +type User @key(fields: "id") { + id: ID! + name: String! + email: String! +} + +# In the Orders service +type User @key(fields: "id") { + id: ID! + orders: [Order!]! +} +``` + +In this example: +- The `User` type is defined in both services +- The `@key` directive specifies that `id` is the field that uniquely identifies a User +- The Users service owns the core User fields (id, name, email) +- The Orders service extends User to add the orders field + +When a client queries for a User with their orders, Tyk's federation engine knows how to fetch the core User data from the Users service and the orders data from the Orders service, then combine them into a single response. + +### Extended Types with @external + +The `@external` directive explicitly indicates that a type extends an entity defined in another service: + +```graphql +# In a service extending the User type +extend type User @key(fields: "id") { + id: ID! @external + reviews: [Review!]! +} +``` + +In this example: +- The `extend` keyword and `@external` directive indicate this is extending the User type +- The `@external` directive on the `id` field indicates this field is defined in another service +- This service adds the `reviews` field to the User type + +## Best Practices + +### Type Definition Best Practices + +1. **Use Non-Nullable Fields Wisely** + Consider future API evolution when deciding which fields should be non-nullable. + +2. **Consistent Naming Conventions** + Use PascalCase for type names, camelCase for field names, and ALL_CAPS for enum values. + +3. **Input Type Naming** + Name input types clearly to indicate their purpose (e.g., CreateUserInput, UpdateUserInput). + +4. **Scalar Type Usage** + Choose appropriate scalar types based on semantic meaning, not just data format. + +5. **Interface and Union Usage** + Use interfaces for shared fields and unions for different types that might be returned from the same field. + +### Limitations and Considerations + +1. **Custom Scalar Validation** + Ensure your upstream service properly validates custom scalar values. + +2. **Schema Evolution** + Start with nullable fields when unsure about requirements and use deprecation before removing fields. + +3. **Performance Considerations** + Limit nesting depth in types and consider pagination for list fields. + +## Type System Example + +```graphql +# Custom scalars +scalar DateTime @specifiedBy(url: "https://tools.ietf.org/html/rfc3339") +scalar JSON + +# Interfaces +interface Node { + id: ID! +} + +# Enums +enum Status { + ACTIVE + PENDING + INACTIVE +} + +# Input types +input ProductInput { + name: String! + description: String + price: Float! + metadata: JSON +} + +# Object types +type Product implements Node { + id: ID! + name: String! + description: String + price: Float! + status: Status! + createdAt: DateTime! + metadata: JSON +} + +# Query and Mutation types +type Query { + getProduct(id: ID!): Product + listProducts(status: Status): [Product!]! +} + +type Mutation { + createProduct(input: ProductInput!): Product! + updateProduct(id: ID!, input: ProductInput!): Product! +} +``` \ No newline at end of file diff --git a/api-management/logs-metrics.mdx b/api-management/logs-metrics.mdx new file mode 100644 index 000000000..00bd8c379 --- /dev/null +++ b/api-management/logs-metrics.mdx @@ -0,0 +1,1662 @@ +--- +title: "API Observability - Configuring Logs and Metrics" +description: "Learn how to configure logs and metrics in Tyk for effective API observability, including integration with third-party tools." +keywords: "Metrics, Traces, Logs, System Logs, API Traffic, Opentelemetry, Datadog, Dynatrace, New Relic, Elastic Search, Jaeger, Monitoring, Observability" +sidebarTitle: "Metrics and Logs" +--- + +## Introduction + +API observability is the process of monitoring and analyzing APIs to gain insights into developer and end-user experience and to ensure the reliability of your system. + +You can achieve API observability by using a combination of telemetry signals such as traces, metrics, and logs. Each of these signals serves a specific purpose in monitoring and troubleshooting API issues: + +### Logs + +Logs provide detailed records of events and activities within the API processing and associated services. Logs are invaluable for debugging issues and understanding what happened at a specific point in time. Here's how you can use logs for API observability: + +- **Error Identification:** Use logs to identify errors, exceptions, and warning messages that indicate issues with the API's behavior. + +- **Debugging:** Logs help developers troubleshoot and debug issues by providing detailed information about the sequence of events leading up to a problem. + +- **Security Monitoring:** Monitor logs for security-related events, such as authentication failures, access control violations and suspicious activities. + +- **Audit Trail:** Maintain an audit trail of important actions and changes to the API, including configuration changes, access control changes and data updates. + + +Tyk allows you to capture and analyze logs related to API requests and responses in the [Log Browser](/api-management/dashboard-configuration#activity-logs) . You can optionally enable detailed recording for the requests per API level or per Key level to store inbound request and outbound response data. You can [enable debug modes](/api-management/troubleshooting-debugging#capturing-detailed-logs) for selected APIs and send the detail logs to one or more Pump backend instances. + +To achieve comprehensive API observability, it is essential to integrate traces, metrics and logs into the observability tools that the team in charge of the APIs are already using. Those tools should allow users to query and visualize data, set up alerts and provide an intuitive interface for monitoring and troubleshooting API issues effectively. See also our 7 observability anti-pattern to avoid when working with APIs: [Bad API observability](https://tyk.io/blog/bad-api-observability/). + +### Metrics + +Metrics provide aggregated, quantitative data about the performance and behavior of an API over time. They offer insights into the overall health of the system. Here's how you can leverage metrics for API observability: + +- **Key Performance Indicators (KPIs):** Define and track essential metrics such as request rate, response time, error rate and resource utilization to monitor the overall health and performance of the API. + +- **Custom Metrics:** Create custom metrics that are specific to your API's functionality or business objectives. For example, track the number of successful payments processed or the number of users signed up. + +- **Threshold Alerts:** Set up alerts based on predefined thresholds for metrics to receive notifications when API performance deviates from the expected norm. + +- **Trend Analysis:** Analyze metric trends over time to identify long-term performance patterns, plan for scaling and detect anomalies. + +Tyk Dashboard offers a [traffic analytics](/api-management/dashboard-configuration#traffic-analytics) function that provides insights into API usage, traffic patterns and response times. The built-in metrics allow you to track overall API traffic, detailed API analytics including: request count, response time distribution and error rates. API usage can be tracked on a per-client (per-key) basis. + +This analysis uses the [traffic logs](/api-management/logs-metrics#api-traffic-logs) generated by Tyk Gateway from API requests and responses. Tyk Pump is used to aggregate and transfer the logs to Tyk Dashboard's [aggregate analytics storage](/api-management/dashboard-configuration#data-storage-solutions). + +You can also use Tyk Pump to export those metrics to [different back-ends](/api-management/tyk-pump#external-data-stores). Here is an example of using Tyk Pump to send [API analytics metrics to Prometheus and Grafana](https://tyk.io/blog/service-level-objectives-for-your-apis-with-tyk-prometheus-and-grafana/). + +You can also leverage the OpenTelemetry spans exported from Tyk Gateway to calculate and export [span metrics](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/spanmetricsconnector/README.md) from the OpenTelemetry collector. + +### Distributed Tracing + +Distributed traces provide a detailed, end-to-end view of a single API request or transaction as it traverses through various services and components. Traces are crucial for understanding the flow of requests and identifying bottlenecks or latency issues. Here's how you can make use of traces for API observability: + +- **End-to-end request tracing:** Implement distributed tracing across your microservices architecture to track requests across different services and gather data about each service's contribution to the overall request latency. + +- **Transaction Flow:** Visualize the transaction flow by connecting traces to show how requests move through different services, including entry points (e.g., API gateway), middleware and backend services. + +- **Latency Analysis:** Analyze trace data to pinpoint which service or component is causing latency issues, allowing for quick identification and remediation of performance bottlenecks. + +- **Error Correlation:** Use traces to correlate errors across different services to understand the root cause of issues and track how errors propagate through the system. + + +Since v5.2, Tyk Gateway has supported the [OpenTelemetry](/api-management/logs-metrics#opentelemetry) standard for distributed tracing. You can configure Tyk to work with an [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) or integrate it with any [observability vendor supporting OpenTelemetry](https://opentelemetry.io/ecosystem/vendors/) to capture traces of API requests as they flow through Tyk Gateway and any upstream services. + +Explore our guides for [Datadog](/api-management/logs-metrics#datadog), [Dynatrace](/api-management/logs-metrics#dynatrace), [Jaeger](/api-management/logs-metrics#using-docker) and [New Relic](/api-management/logs-metrics#new-relic) for further info on how to integrate with 3rd party observability vendors. + +Tyk also supports the legacy [OpenTracing](/api-management/logs-metrics#opentracing-deprecated) approach (now deprecated), but we recommend users to adopt OpenTelemetry for a comprehensive, vendor-neutral technology with wide industry support. + +## Logging + +Tyk Gateway generates two different types of logs for various operational aspects: + +- **System logs** capture internal gateway events, typically used for monitoring and debugging. +- **API traffic logs**, also known as transaction logs, record details of every request and response handled by the gateway and are stored in Redis. They are typically processed by Tyk Pump to create aggregated data that are then transferred to persistent storage. Tyk Pump can also be used to transfer the raw logs to 3rd Party analysis tools. + +While system logs focus on the gateway's internal operations and errors, API traffic logs provide insights into API usage, security events, and performance trends. Logging verbosity and format can be customized to suit different operational needs. + +### System Logs + +Tyk will log **system events** to `stderr` and `stdout`. + +In a typical installation, these will be handled or redirected by the service manager running the process, and depending on the Linux distribution, will either be output to `/var/log/` or `/var/log/upstart`. + +Tyk will try to output structured logs, and so will include context data around request errors where possible. + +[Custom logging event handlers](/api-management/gateway-events#logging-api-events-1) can be registered against **Gateway events** to customise the logs that are generated for those events. + +When contacting support, you may be asked to change the logging level as part of the support handling process. See [Support Information](/api-management/troubleshooting-debugging#support-information) for more details. + +#### Log verbosity + +Tyk can generate system logs at four levels of verbosity: +- `error` is the most minimal level of logging, reporting only errors +- `warn` will log warnings and errors +- `info` logs errors, warnings and some additional information and is the default logging level +- `debug` generates a high volume of logs for maximum visibility of what Tyk is doing when you need to debug an issue + + + + + Debug log level generates a significant volume of data and is not recommended except when debugging. You can enable Debug mode reporting by adding the `--debug` flag to the process run command. + + + +You can set the logging verbosity for each Tyk Component using the appropriate `log_level` setting in its configuration file (or the equivalent environment variable). Note that there is no independent log level setting for Tyk Dashboard. + +| Tyk component | Config option | Environment variable | Default value if unset | +| :---------------- | :--------------- | :---------------------- | :-------------------------- | +| All components (except EDP) | | `TYK_LOGLEVEL` | `info` | +| [Tyk Gateway](/tyk-oss-gateway/configuration#log_level) | `log_level` | `TYK_GW_LOGLEVEL` | `info` | +| [Tyk Pump](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#log_level) | `log_level` | `TYK_PMP_LOGLEVEL` | `info` | +| [Tyk MDCB](/tyk-multi-data-centre/mdcb-configuration-options#log_level) | `log_level` | `TYK_MDCB_LOGLEVEL` | `info` | +| [Tyk Enterprise Developer Portal](/product-stack/tyk-enterprise-developer-portal/deploy/configuration#portal_log_level) | `logLevel` | `PORTAL_LOG_LEVEL` | `info` | + +For example, setting [TYK_GW_LOGLEVEL](/tyk-oss-gateway/configuration#log_level) environment variable to `debug` will enable verbose debug for the Gateway. + +Tyk support can advise you which level of verbosity to use for your deployment. + +#### Log format (only available for the Gateway) + +As of Tyk Gateway `v5.6.0`, you can control the format in which logs will be generated - either `default` or `json` - using the `TYK_LOGFORMAT` environment variable. As a general performance tip, the `json` output format incurs less memory allocation overhead than the `default` format. For optimal performance, it's recommended to configure logging in the JSON format. + +This is an example of the `default` logging format: +``` +time="Sep 05 09:04:12" level=info msg="Tyk API Gateway v5.6.0" prefix=main +``` + +And an example of `json` logging format: +```json +{"level":"info","msg":"Tyk API Gateway v5.6.0","prefix":"main","time":"2024-09-05T09:01:23-04:00"} +``` + +#### Exporting Logs to Third-Party Tools + +Tyk can be configured to send log data to a range of 3rd party tools for aggregation and analysis. + +The following targets are supported: +- [Sentry](#sentry) +- [Logstash](#logstash) +- [Graylog](#graylog) +- [Syslog](#syslog) + +##### Sentry + +To enable Sentry as a log aggregator, update these settings in both your `tyk.conf` and your `tyk_analytics.conf`: + +* `use_sentry`: Set this to `true` to enable the Sentry logger, you must specify a Sentry DSN under `sentry_code`. + +* `sentry_code`: The Sentry-assigned DSN (a kind of URL endpoint) that Tyk can send log data to. + +##### Logstash + +To enable Logstash as a log aggregator, update these settings in your `tyk.conf`: + +* `use_logstash`: Set this to `true` to enable the Logstash logger. + +* `logstash_transport`: The Logstash transport to use, should be `"tcp"`. + +* `logstash_network_addr`: Set to the Logstash client network address, should be in the form of `hostname:port`. + +##### Graylog + +To enable Graylog as a log aggregator, update these settings in your `tyk.conf`: + +* `use_graylog`: Set this to `true` to enable the Graylog logger. + +* `graylog_network_addr`: The Graylog client address in the form of `:`. + +##### Syslog + +To enable Syslog as a log aggregator, update these settings in your `tyk.conf`: + +* `use_syslog`: Set this to `true` to enable the Syslog logger. + +* `syslog_transport`: The Syslog transport to use, should be `"udp"` or empty. + +* `syslog_network_addr`: Set to the Syslog client network address, should be in the form of `hostname:port` + +### API Traffic Logs + +When a client makes a request to the Tyk Gateway, the details of the request and response are captured and stored in a temporary Redis list. In Tyk these transaction logs are also referred to as traffic analytics or simply analytics. This list is read (and then flushed) every 10 seconds by the [Tyk Pump](/api-management/tyk-pump). + +The Pump processes the records that it has read from Redis and forwards them to the required data sinks (e.g. databases or other tools) using the pumps configured in your system. You can set up multiple pumps and configure them to send different data to different sinks. The Mongo Aggregate and SQL Aggregate pumps perform aggregation of the raw analytics records before storing the aggregated statistics in the MongoDB or SQL database respectively. + +#### When to use API Traffic Logging + +1. **API usage trends** + + Monitoring the usage of your APIs is a key functionality provided by any API Management product. Traffic analytics give you visibility of specific and aggregated accesses to your services which you can monitor trends over time. You can identify popular and underused services which can assist with, for example, determining the demand profile for your services and thus appropriate sizing of the upstream capacity. + +2. **Security monitoring** + + Tracking requests made to security-critical endpoints, like those used for authentication or authorization, can help in identifying and mitigating potential security threats. Monitoring these endpoints for unusual activity patterns is a proactive security measure. + +3. **Development and testing** + + Enabling tracking during the development and testing phases can provide detailed insights into the API's behavior, facilitating bug identification and performance optimization. Adjustments to tracking settings can be made as the API transitions to production based on operational requirements. + +#### How API Traffic Logging Works + +API traffic logging must be enabled at the Gateway level in the startup configuration using the [enable_analytics](/tyk-oss-gateway/configuration#enable_analytics) field (or by setting the equivalent environment variable `TYK_GW_ENABLEANALYTICS`). + +The transaction records generated by the Gateway are stored in Redis, from which Tyk Pump can be configured to transfer them to the desired persistent storage. When using Tyk Dashboard, the [Aggregate Pump](/api-management/tyk-pump#tyk-dashboard) can be used to collate aggregated data that is presented in the [analytics](/api-management/dashboard-configuration#traffic-analytics) screens of the Tyk Dashboard. + +The Gateway will not, by default, include the request and response payloads in the transaction records. This minimizes the size of the records and also avoids logging any sensitive content. The [detailed recording](/api-management/logs-metrics#capturing-detailed-logs) option is provided if you need to capture the payloads in the records. + +You can suppress the generation of transaction records for any endpoint by enabling the [do-not-track middleware](/api-management/traffic-transformation/do-not-track) for that endpoint. This provides granular control over request tracking. + +You can find details of all the options available to you when configuring analytics in the Gateway in the [reference documentation](/tyk-oss-gateway/configuration#analytics_config). + + + +For the Tyk Dashboard's analytics functionality to work, you must configure both per-request and aggregated pumps for the database platform that you are using. For more details see the [Setup Dashboard Analytics](/api-management/tyk-pump#setup-dashboard-analytics) section. + + + +#### Capturing Detailed Logs + +The Gateway will not, by default, include the request and response payloads in traffic logs. This minimizes the size of the records and also minimises the risk of logging sensitive content. + +You can, however, configure Tyk to capture the payloads in the transaction records if required. This can be particularly useful during development and testing phases or when debugging an issue with an API. + +This is referred to as detailed recording and can be enabled at different levels of granularity. The order of precedence is: +1. [API level](/api-management/logs-metrics#configure-at-api-level) +2. [Key level](/api-management/logs-metrics#configure-at-key-level) +3. [Gateway level](/api-management/logs-metrics#configure-at-gateway-level) + +Consequently, Tyk will first check whether the API definition has detailed recording enabled to determine whether to log the request and response bodies. If it does not, then it will check the key being used in the request and finally it will check the Gateway configuration. + + + +Be aware that enabling detailed recording greatly increases the size of the records and will require significantly more storage space as Tyk will store the entire request and response in wire format. +
+
+Tyk Cloud users can enable detailed recording per-API following the instructions on this page or, if required at the Gateway level, via a support request. The traffic logs are subject to the subscription's storage quota and so we recommend that detailed logging only be enabled if absolutely necessary to avoid unnecessary costs. +
+ + +##### Configure at API level + +You can enable detailed recording for an individual API by setting the [server.detailedActivityLogs.enabled](/api-management/gateway-config-tyk-oas#detailedactivitylogs) flag within the Tyk Vendor Extension. + +In the Dashboard UI, you can configure detailed recording using the **Enable Detailed Activity Logs** option in the API Designer. + +Enabling detailed activity logs for a Tyk OAS API + +**Tyk Classic APIs** + +When working with Tyk Classic APIs, you should configure the equivalent `enable_detailed_recording` flag in the root of the API definition. + +In the Tyk Classic API Designer, the **Enable Detailed Logging** option can be found in **Core Settings**. + +Enabling detailed activity logs for a Tyk Classic API + +When using Tyk Operator with Tyk Classic APIs, you can enable detailed recording by setting `spec.enable_detailed_recording` to `true`, as in this example: + +```yaml {linenos=true, linenostart=1, hl_lines=["10-10"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: http + active: true + enable_detailed_recording: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +##### Configure at Key Level +An alternative approach to controlling detailed recording is to enable it only for specific [access keys](/api-management/policies#what-is-a-session-object). This is particularly useful for debugging purposes where you can configure detailed recording only for the key(s) that are reporting issues. + +You can enable detailed recording for a key simply by adding the following to the root of the key's JSON file: + +``` +"enable_detailed_recording": true +``` + + + +This will enable detailed recording only for API transactions where this key is used in the request. + + + +##### Configure at Gateway Level +Detailed recording can be configured at the Gateway level, affecting all APIs deployed on the Gateway, by enabling the [detailed recording](/tyk-oss-gateway/configuration#analytics_configenable_detailed_recording) option in `tyk.conf`. + +```.json +{ + "enable_analytics" : true, + "analytics_config": { + "enable_detailed_recording": true + } +} +``` + +#### Enabling API Request Access Logs in Tyk Gateway + +As of Tyk Gateway `v5.8.0`, you can configure the Gateway to log individual API request transactions. To enable this feature, set the `TYK_GW_ACCESSLOGS_ENABLED` environment variable to `true`. + +##### Configuring output fields + +You can specify which fields are logged by configuring the `TYK_GW_ACCESSLOGS_TEMPLATE` environment variable. Below are the available values you can include: + +- `api_key`: Obfuscated or hashed API key used in the request. +- `client_ip`: IP address of the client making the request. +- `host`: Hostname of the request. +- `method`: HTTP method used in the request (e.g., GET, POST). +- `path`: URL path of the request. +- `protocol`: Protocol used in the request (e.g., HTTP/1.1). +- `remote_addr`: Remote address of the client. +- `upstream_addr`: Full upstream address including scheme, host, and path. +- `upstream_latency`: Roundtrip duration between the gateway sending the request to the upstream server and it receiving a response. +- `latency_total`: Total time taken for the request, including upstream latency and additional processing by the gateway. +- `user_agent`: User agent string from the client. +- `status`: HTTP response status code. + +To configure, set `TYK_GW_ACCESSLOGS_TEMPLATE` environment variable with the desired values in the format: `["value1", "value2", ...]`. + +##### Default log example + +Configuration using `tyk.conf` + +```json +{ + "access_logs": { + "enabled": true + } +} +``` + +Configuration using environment variables: + +``` +TYK_GW_ACCESSLOGS_ENABLED=true +``` + +Output: + +``` +time="Jan 29 08:27:09" level=info api_id=b1a41c9a89984ffd7bb7d4e3c6844ded api_key=00000000 api_name=httpbin client_ip="::1" host="localhost:8080" latency_total=62 method=GET org_id=678e6771247d80fd2c435bf3 path=/get prefix=access-log protocol=HTTP/1.1 remote_addr="[::1]:63251" status=200 upstream_addr="http://httpbin.org/get" upstream_latency=61 user_agent=PostmanRuntime/7.43.0 +``` + +##### Custom template log example + +Configuration using `tyk.conf` + +```json +{ + "access_logs": { + "enabled": true, + "template": [ + "api_key", + "remote_addr", + "upstream_addr" + ] + } +} +``` + +Configuration using environment variables: + +``` +TYK_GW_ACCESSLOGS_ENABLED=true +TYK_GW_ACCESSLOGS_TEMPLATE="api_key,remote_addr,upstream_addr" +``` + +Output: + +``` +time="Jan 29 08:27:48" level=info api_id=b1a41c9a89984ffd7bb7d4e3c6844ded api_key=00000000 api_name=httpbin org_id=678e6771247d80fd2c435bf3 prefix=access-log remote_addr="[::1]:63270" upstream_addr="http://httpbin.org/get" +``` + +##### Performance Considerations + +Enabling access logs introduces some performance overhead: + +- **Latency:** Increases consistently by approximately 4%–13%, depending on CPU allocation and configuration. +- **Memory Usage:** Memory consumption increases by approximately 6%–7%. +- **Allocations:** The number of memory allocations increases by approximately 5%–6%. + + + + + While the overhead of enabling access logs is noticeable, the impact is relatively modest. These findings suggest the performance trade-off may be acceptable depending on the criticality of logging to your application. + + + +#### Aggregated analytics + +The traffic logs that Tyk Gateway generates are stored in the local [Redis](/api-management/logs-metrics#how-api-traffic-logging-works) temporal storage. They must be transferred to a persistent data store (such as MongoDB or PostgreSQL) for use by analytics tools, typically using Tyk Pump. Tyk Pump can also generate aggregated statistics from these data using the dedicated [Mongo Aggregate](/api-management/tyk-pump#mongodb) and [SQL Aggregate](/api-management/tyk-pump#sql) pumps. These offload processing from Tyk Dashboard and reduce storage requirements compared with storing all of the raw logs. + +The aggregate pumps calculate statistics from the analytics records, aggregated by hour, for the following keys in the traffic logs: + +| Key | Analytics aggregated by | Dashboard screen | +| :---------------- | :---------------------------------- | :------------------------------------------------------------- | +| `APIID` | API proxy | [Activity by API](/api-management/dashboard-configuration#activity-by-api) | +| `TrackPath` | API endpoint | [Activity by endpoint](/api-management/dashboard-configuration#activity-by-endpoint) | +| `ResponseCode` | HTTP status code (success/error) | [Activity by errors](/api-management/dashboard-configuration#activity-by-error) | +| `APIVersion` | API version | n/a | +| `APIKey` | Client access key/token | [Activity by Key](/api-management/dashboard-configuration#activity-by-key) | +| `OauthID` | OAuth client (if OAuth used) | [Traffic per OAuth Client](/api-management/dashboard-configuration#activity-by-oauth-client) | +| `Geo` | Geographic location of client | [Activity by location](/api-management/dashboard-configuration#activity-by-location) | + +##### Custom aggregation keys + +Whereas Tyk Pump will automatically produce aggregated statistics for the keys in the previous section, you can also define custom aggregation keys using Tyk's custom analytics tag feature which identifies specific HTTP request headers to be used as aggregation keys. This has various uses, for example" + +- You need to record additional information from the request into the analytics but want to avoid [detailed logging](/api-management/logs-metrics#capturing-detailed-logs) due to the volume of traffic logs. +- You wish to track a group of API requests, for example: + - Show me all API requests where `tenant-id=123` + - Show me all API requests where `user-group=abc` + +The Traffic Log middleware is applied to all endpoints in the API and so configuration is found in the `middleware.global` section of the Tyk Vendor Extension, within the `trafficLogs` section. Custom aggregation tags are specified as a list of HTTP headers in [middleware.global.trafficLogs.tagHeaders](/api-management/gateway-config-tyk-oas#trafficlogs) that Tyk should use for generation of custom aggregation tags for the API. + +For example if we include the header name `x-user-id` in the list of headers, then Tyk will create an aggregation key for each different value observed in that header. These aggregation keys will be given the name `-`, for example `x-user-id-1234` if the request contains the HTTP header `"x-user-id":1234`. + +**Tyk Classic APIs** + +If you are using Tyk Classic APIs, then the equivalent field in the API definition is [tag_headers](/api-management/gateway-config-tyk-classic#traffic-logs). + +In the Tyk Classic API Designer, the **Tag Headers** option can be found in **Advanced Options**. + +Tag Headers + +When using Tyk Operator with Tyk Classic APIs, you can configure custom analytics tags by setting `spec.tag_headers` to `true`, as in this example: + +```yaml {linenos=true, linenostart=1, hl_lines=["10-12"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-tag-headers +spec: + name: httpbin-tag-headers + use_keyless: true + protocol: http + active: true + tag_headers: + - Host + - User-Agent + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-tag-headers + strip_listen_path: true +``` + +In this example we can see that the `Host` and `User-Agent` headers exist within the `tag_headers` array. For each incoming request Tyk will add `host-` and `user-agent-` tags to the list of tags in the traffic log. + +###### Suppressing generation of aggregates for custom keys + +If you don't want or need aggregated analytics for the headers you record with `tagHeaders`, you can configure Tyk Pump (or Tyk MDCB if it is performing the pump functionality) to discard those statistics when writing to the persistent analytics store. + +For both cases, you simply add the tags you want to ignore, or their prefixes, to the `ignore_tag_prefix_list` field in the appropriate configuration file or environment variable: + +- [Hybrid Pump config](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#pumpshybridmetaignore_tag_prefix_list) +- [Mongo Pump config](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#pumpsmongoaggregatemetaignore_tag_prefix_list) +- [Splunk Pump config](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#pumpssplunkmetaignore_tag_prefix_list) +- [SQL Pump config](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#pumpssqlaggregatemetaignore_tag_prefix_list) +- [MDCB config](/tyk-multi-data-centre/mdcb-configuration-options#ignore_tag_prefix_list) + + + + + If you add headers to the tags list that are unique to each request, such as a timestamp or unique request Id, then Tyk Gateway will essentially create an aggregation point _per request_ and the number of these tags in an hour will be equal to the number of requests. Since there's no real value in aggregating something that has a total of one, we recommend that you add such headers to the ignore list. + + + +## Metric Collection + +Metrics collection and analysis are key components of an Observability strategy, providing real-time insight into system behaviour and performance. + +Tyk Gateway, Pump and Dashboard have been instrumented for [StatsD](https://github.com/etsy/statsd) monitoring. + +Additionally, Tyk Gateway has also been instrumented for [New Relic](https://newrelic.com) metrics. + +### StatsD Instrumentation + +StatsD is a network daemon that listens for statistics, like counters and timers, sent over UDP or TCP and sends aggregates to one or more pluggable backend services. It's a simple yet powerful tool for collecting and aggregating application metrics. + +#### Configuring StatsD instrumentation + +To enable instrumentation for StatsD, you must set the environment variable: `TYK_INSTRUMENTATION=1` and then configure the `statsd_connection_string` field for each component. + +`statsd_connection_string` is a formatted string that specifies how to connect to the StatsD server. It typically includes information such as the host address, port number, and sometimes additional configuration options. + +Optionally you can set `statsd_prefix` to a custom prefix value that will be applied to each metric generated by Tyk. For example, you can configure separate prefixes for your production and staging environments to make it easier to differentiate between the metrics in your analysis tool. + +#### StatsD Keys + +There are plenty of keys (metrics) available when you enable the StatsD instrumentation, but these are the basics: + +- API traffic handled by Gateway: `gauges..Load.rps` (requests per second) +- Tyk Gateway API: `counters..SystemAPICall.called.count` (calls count) and `timers..SystemAPICall.success` (response time) +- Tyk Dashboard API: `counters..SystemAPICall.SystemCallComplete.count` (requests count), `counters..DashSystemAPIError.*` (API error reporting) +- Tyk Pump records: `counters..record.count` (number of records processed by pump) + + +### New Relic Instrumentation + +Tyk Gateway has been instrumented for New Relic metrics since v2.5. Simply add the following config section to `tyk.conf` to enable the instrumentation and generation of data: + +```json +{ + "newrelic": { + "app_name": "", + "license_key": "" + } +} +``` + +## OpenTelemetry + +Starting from Tyk Gateway version 5.2, you can leverage the power of [OpenTelemetry](https://opentelemetry.io/docs/what-is-opentelemetry/), an open-source observability framework designed for cloud-native software. This enhances your API monitoring with end-to-end distributed tracing. At this time, Tyk does not support OpenTelemetry metrics or logging, but we have these on our roadmap for future enhancement of the product. + +This documentation will guide you through the process of enabling and configuring OpenTelemetry in Tyk Gateway. You'll also learn how to customize trace detail levels to meet your monitoring requirements. + +For further guidance on configuring your observability back-end, explore our guides for [Datadog](/api-management/logs-metrics#datadog), [Dynatrace](/api-management/logs-metrics#dynatrace), [Jaeger](/api-management/logs-metrics#jaeger) and [New Relic](/api-management/logs-metrics#new-relic). + +All the configuration options available when using Tyk's OpenTelemetry capability are documented in the [Tyk Gateway configuration guide](/tyk-oss-gateway/configuration#opentelemetry). + +### Using OpenTelemetry with Tyk + +OpenTelemetry support must be enabled at the Gateway level by adding the following to the Tyk Gateway configuration file (typically `tyk.conf`): + +```json + { + "opentelemetry": { + "enabled": true + } + } +``` + +Alternatively you can set the corresponding environment variable `TYK_GW_OPENTELEMETRY_ENABLED` to `true`. + + + +By default, OpenTelemetry spans are exported to the collector using the `gRPC` protocol to `localhost:4317`. You can choose between HTTP and gRPC protocols by configuring the [opentelemetry.exporter](/tyk-oss-gateway/configuration#opentelemetryexporter) field to `http` or `grpc`. You can specify an alternative target using the [opentelemetry.endpoint](/tyk-oss-gateway/configuration#opentelemetryendpoint) control. + + + +Tyk Gateway will now generate two spans for each request made to your APIs, encapsulating the entire request lifecycle. These spans include attributes and tags but lack fine-grained details. The parent span represents the total time from request reception to response and the child span represent the time spent in the upstream service. + +Detailed Tracing Disabled + +#### Detailed Tracing + +You can generate more detailed traces for requests to an API by setting the [server.detailedTracing](/api-management/gateway-config-tyk-oas#detailedtracing) flag in the Tyk Vendor Extension of the API definition. + +For users of the Tyk Dashboard UI, the **OpenTelemetry Tracing** option in the Tyk OAS API Designer allows you to set and unset this option for the API. + +Detailed Tracing Disabled + +When detailed tracing is enabled for an API, Tyk creates a span for each middleware involved in request processing. These spans offer detailed insights, including the time taken for each middleware execution and the sequence of invocations. + +Detailed Tracing Enabled + +By choosing the appropriate setting, you can customize the level of tracing detail to suit your monitoring needs. + +**Tyk Classic APIs** + +If you are using Tyk Classic APIs, then the equivalent field in the API definition is [detailed_tracing](/api-management/gateway-config-tyk-classic#opentelemetry). + +### Understanding The Traces + +Tyk Gateway exposes a helpful set of *span attributes* and *resource attributes* with the generated spans. These attributes provide useful insights for analyzing your API requests. A clear analysis can be obtained by observing the specific actions and associated context within each request/response. This is where span and resource attributes play a significant role. + +#### Span Attributes + +A span is a named, timed operation that represents an operation. Multiple spans represent different parts of the workflow and are pieced together to create a trace. While each span includes a duration indicating how long the operation took, the span attributes provide additional contextual metadata. + +Span attributes are key-value pairs that provide contextual metadata for individual spans. Tyk automatically sets the following span attributes: + +- `tyk.api.name`: API name. +- `tyk.api.orgid`: Organization ID. +- `tyk.api.id`: API ID. +- `tyk.api.path`: API listen path. +- `tyk.api.tags`: If tagging is enabled in the API definition, the tags are added here. + +#### Resource Attributes + +Resource attributes provide contextual information about the entity that produced the telemetry data. Tyk exposes following resource attributes: + +#### Service Attributes + +The service attributes supported by Tyk are: + +| Attribute | Type | Description | +| :--------------------- | :-------- | :- | +| `service.name` | String | Service name for Tyk API Gateway: `tyk-gateway` | +| `service.instance.id` and `tyk.gw.id` | String | The Node ID assigned to the gateway. Example `solo-6b71c2de-5a3c-4ad3-4b54-d34d78c1f7a3` | +| `service.version` | String | Represents the service version. Example `v5.2.0` | +| `tyk.gw.dataplane` | Bool | Whether the Tyk Gateway is hybrid (`slave_options.use_rpc=true`) | +| `tyk.gw.group.id` | String | Represents the `slave_options.group_id` of the gateway. Populated only if the gateway is hybrid. | +| `tyk.gw.tags` | []String | Represents the gateway `segment_tags`. Populated only if the gateway is segmented. | + +By understanding and using these resource attributes, you can gain better insights into the performance of your API Gateways. + +#### Common HTTP Span Attributes + +Tyk follows the OpenTelemetry semantic conventions for HTTP spans. You can find detailed information on common attributes [here](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/http/http-spans.md#common-attributes). + +Some of these common attributes include: + +- `http.method`: HTTP request method. +- `http.scheme`: URL scheme. +- `http.status_code`: HTTP response status code. +- `http.url`: Full HTTP request URL. + +For the full list and details, refer to the official [OpenTelemetry Semantic Conventions](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/http/http-spans.md#common-attributes). + +### Advanced OpenTelemetry Capabilities + +#### Context Propagation + +This setting allows you to specify the type of context propagator to use for trace data. It's essential for ensuring compatibility and data integrity between different services in your architecture. The available options are: + +- **tracecontext**: This option supports the [W3C Trace Context](https://www.w3.org/TR/trace-context/) format. +- **b3**: This option serializes `SpanContext` to/from the B3 multi Headers format. [Here](https://github.com/openzipkin/b3-propagation) you can find more information of this propagator. + +The default setting is `tracecontext`. To configure this setting, you have two options: + +- **Environment Variable**: Use `TYK_GW_OPENTELEMETRY_CONTEXTPROPAGATION` to specify the context propagator type. +- **Configuration File**: Navigate to the `opentelemetry.context_propagation` field in your configuration file to set your preferred option. + +#### Sampling Strategies + +Tyk supports configuring the following sampling strategies via the Sampling configuration structure: + +##### Sampling Type + +This setting dictates the sampling policy that OpenTelemetry uses to decide if a trace should be sampled for analysis. The decision is made at the start of a trace and applies throughout its lifetime. By default, the setting is `AlwaysOn`. + +To customize, you can either set the `TYK_GW_OPENTELEMETRY_SAMPLING_TYPE` environment variable or modify the `opentelemetry.sampling.type` field in the Tyk Gateway configuration file. Valid values for this setting are: + +- **AlwaysOn**: All traces are sampled. +- **AlwaysOff**: No traces are sampled. +- **TraceIDRatioBased**: Samples traces based on a specified ratio. + +##### Sampling Rate + +This field is crucial when the `Type` is configured to `TraceIDRatioBased`. It defines the fraction of traces that OpenTelemetry will aim to sample, and accepts a value between 0.0 and 1.0. For example, a `Rate` set to 0.5 implies that approximately 50% of the traces will be sampled. The default value is 0.5. To configure this setting, you have the following options: + +- **Environment Variable**: Use `TYK_GW_OPENTELEMETRY_SAMPLING_RATE`. +- **Configuration File**: Update the `opentelemetry.sampling.rate` field in the configuration file. + +##### ParentBased Sampling + +This option is useful for ensuring the sampling consistency between parent and child spans. Specifically, if a parent span is sampled, all it's child spans will be sampled as well. This setting is particularly effective when used with `TraceIDRatioBased`, as it helps to keep the entire transaction story together. Using `ParentBased` with `AlwaysOn` or `AlwaysOff` may not be as useful, since in these cases, either all or no spans are sampled. The default value is `false`. Configuration options include: + +- **Environment Variable**: Use `TYK_GW_OPENTELEMETRY_SAMPLING_PARENTBASED`. +- **Configuration File**: Update the `opentelemetry.sampling.parent_based` field in the configuration file. + +### OpenTelemetry Backends for Tracing + +#### Datadog + +This guide explains how to configure Tyk API Gateway and the OpenTelemetry Collector to collect distributed traces in Datadog. It follows the reference documentation from [Datadog](https://docs.datadoghq.com/opentelemetry/otel_collector_datadog_exporter/?tab=onahost). + +While this tutorial demonstrates using an OpenTelemetry Collector running in Docker, the core concepts remain consistent regardless of how and where the OpenTelemetry collector is deployed. + +Whether you're using Tyk API Gateway in an open-source (OSS) or commercial deployment, the configuration options remain identical. + +##### Prerequisites + +- [Docker installed on your machine](https://docs.docker.com/get-docker/) +- Tyk Gateway v5.2.0 or higher +- OpenTelemetry Collector Contrib [docker image](https://hub.docker.com/r/otel/opentelemetry-collector-contrib). Make sure to use the Contrib distribution of the OpenTelemetry Collector as it is required for the [Datadog exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/datadogexporter). + +##### Steps for Configuration + +1. **Configure the OpenTelemetry Collector** + + You will need: + - An [API key from Datadog](https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token). For example, `6c35dacbf2e16aa8cda85a58d9015c3c`. + - Your [Datadog site](https://docs.datadoghq.com/getting_started/site/#access-the-datadog-site). Examples are: `datadoghq.com`, `us3.datadoghq.com` and `datadoghq.eu`. + + Create a new YAML configuration file named `otel-collector.yml` with the following content: + + ```yaml + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + processors: + batch: + send_batch_max_size: 100 + send_batch_size: 10 + timeout: 10s + exporters: + datadog: + api: + site: "YOUR-DATADOG-SITE" + key: "YOUR-DATAGOG-API-KEY" + service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog] + ``` + +2. **Configure a test API** + + If you don't have any APIs configured yet, create a subdirectory called `apps` in the current directory. Create a new file `apidef-hello-world.json` and copy this very simple API definition for testing purposes: + + ```json + { + "name": "Hello-World", + "slug": "hello-world", + "api_id": "Hello-World", + "org_id": "1", + "use_keyless": true, + "detailed_tracing": true, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "use_extended_paths": true + } + } + }, + "proxy": { + "listen_path": "/hello-world/", + "target_url": "http://httpbin.org/", + "strip_listen_path": true + }, + "active": true + } + ``` + +3. **Create the Docker-Compose file** + + Save the following YAML configuration to a file named `docker-compose.yml`. + + ```yaml + version: "2" + services: + # OpenTelemetry Collector Contrib + otel-collector: + image: otel/opentelemetry-collector-contrib:latest + volumes: + - ./otel-collector.yml:/etc/otel-collector.yml + command: ["--config=/etc/otel-collector.yml"] + ports: + - "4317" # OTLP gRPC receiver + networks: + - tyk + + # Tyk API Gateway, open-source deployment + tyk: + image: tykio/tyk-gateway:v5.2 + ports: + - 8080:8080 + environment: + - TYK_GW_OPENTELEMETRY_ENABLED=true + - TYK_GW_OPENTELEMETRY_EXPORTER=grpc + - TYK_GW_OPENTELEMETRY_ENDPOINT=otel-collector:4317 + volumes: + - ./apps:/opt/tyk-gateway/apps + depends_on: + - redis + networks: + - tyk + + redis: + image: redis:4.0-alpine + ports: + - 6379:6379 + command: redis-server --appendonly yes + networks: + - tyk + + networks: + tyk: + ``` + + + To start the services, go to the directory that contains the docker-compose.yml file and run the following command: + + ```bash + docker-compose up + ``` + +4. **Explore OpenTelemetry traces in Datadog** + + Begin by sending a few requests to the API endpoint configured in step 2: + `` + http://localhost:8080/hello-world/ + `` + + Next, log in to Datadog and navigate to the 'APM' / 'Traces' section. Here, you should start observing traces generated by Tyk: + + Tyk API Gateway distributed trace in Datadog + + Click on a trace to view all its internal spans: + + Tyk API Gateway spans in Datadog + + Datadog will generate a service entry to monitor Tyk API Gateway and will automatically compute valuable metrics using the ingested traces. + + Tyk API Gateway service monitoring in Datadog + +##### Troubleshooting + +If you do not observe any traces appearing in Datadog, consider the following steps for resolution: + +- Logging: Examine logs from Tyk API Gateway and from the OpenTelemetry Collector for any issues or warnings that might provide insights. +- Data Ingestion Delays: Be patient, as there could be some delay in data ingestion. Wait for 10 seconds to see if traces eventually appear, as this is the timeout we have configured in the batch processing of the OpenTelemetry collector within step 1. + +#### Dynatrace + +This documentation covers how to set up Dynatrace to ingest OpenTelemetry traces via the OpenTelemetry Collector (OTel Collector) using Docker. + +##### Prerequisites + +- [Docker installed on your machine](https://docs.docker.com/get-docker/) +- [Dynatrace account](https://www.dynatrace.com/) +- Dynatrace Token +- Gateway v5.2.0 or higher +- OTel Collector [docker image](https://hub.docker.com/r/otel/opentelemetry-collector) + +##### Steps for Configuration + +1. **Generate Dynatrace Token** + + 1. In the Dynatrace console, navigate to access keys. + 2. Click on _Create a new key_ + 3. You will be prompted to select a scope. Choose _Ingest OpenTelemetry_ traces. + 4. Save the generated token securely; it cannot be retrieved once lost. + + Example of a generated token ([taken from Dynatrace website](https://www.dynatrace.com/support/help/dynatrace-api/basics/dynatrace-api-authentication#token-format-example)): + + ```bash + dt0s01.ST2EY72KQINMH574WMNVI7YN.G3DFPBEJYMODIDAEX454M7YWBUVEFOWKPRVMWFASS64NFH52PX6BNDVFFM572RZM + ``` + +2. **Configuration Files** + + 1. **OTel Collector Configuration File** + + Create a YAML file named `otel-collector-config.yml`. In this file replace `` with the string from the address bar when you log into Dynatrace. Replace `` with the token you generated earlier. + + Here's a sample configuration file: + + ```yaml + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + grpc: + endpoint: 0.0.0.0:4317 + processors: + batch: + exporters: + otlphttp: + endpoint: "https://.live.dynatrace.com/api/v2/otlp" + headers: + Authorization: "Api-Token " # You must keep 'Api-Token', just modify + extensions: + health_check: + pprof: + endpoint: :1888 + zpages: + endpoint: :55679 + service: + extensions: [pprof, zpages, health_check] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp] + ``` + + 2. **Docker Compose File** + + Create a file named docker-compose.yml. + + Here is the sample Docker Compose file: + + ```yaml + version: "3.9" + services: + otel-collector: + image: otel/opentelemetry-collector:latest + volumes: + - ./configs/otel-collector-config.yml:/etc/otel-collector.yml + command: ["--config=/etc/otel-collector.yml"] + networks: + - tyk + ports: + - "1888:1888" # pprof extension + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP http receiver + - "55670:55679" # zpages extension + networks: + tyk: + ``` + +3. **Testing and Viewing Traces** + + **1.** Launch the Docker containers: docker-compose up -d + + **2.** Initialize your Tyk environment. + + **3.** Configure a basic HTTP API on the Tyk Gateway or Dashboard. + + **4.** Use cURL or Postman to send requests to the API gateway. + + **5.** Navigate to Dynatrace -> Services -> Tyk-Gateway. + + Dynatrace Services + + **6.** Wait for 5 minutes and refresh. + + **7.** Traces, along with graphs, should appear. If they don't, click on the "Full Search" button. + + Dynatrace Metrics + +4. **Troubleshooting** + + - If traces are not appearing, try clicking on the "Full Search" button after waiting for 5 minutes. + Make sure your Dynatrace token is correct in the configuration files. + - Validate the Docker Compose setup by checking the logs for any errors: `docker-compose logs` + +And there you have it! You've successfully integrated Dynatrace with the OpenTelemetry Collector using Docker. + +#### Elasticsearch + +This quick start explains how to configure Tyk API Gateway (OSS, self-managed or hybrid gateway connected to Tyk Cloud) with the OpenTelemetry Collector to export distributed traces to [Elasticsearch](https://www.elastic.co/observability). + +##### Prerequisites + +Ensure the following prerequisites are met before proceeding: + +* Tyk Gateway v5.2 or higher +* OpenTelemetry Collector deployed locally +* Elasticsearch deployed locally or an account on Elastic Cloud with Elastic APM + +Elastic Observability natively supports OpenTelemetry and its OpenTelemetry protocol (OTLP) to ingest traces, metrics, and logs. + +OpenTelemetry support in Elasticsearch +Credit: Elasticsearch, [OpenTelemetry on Elastic](https://www.elastic.co/blog/opentelemetry-observability) + +##### Steps for Configuration + +1. **Configure Tyk API Gateway** + + To enable OpenTelemetry in Tyk API Gateway, follow these steps: + + For Tyk Helm Charts: + * Add the following configuration to the Tyk Gateway section: + + ```yaml + tyk-gateway: + gateway: + opentelemetry: + enabled: true + endpoint: {{Add your endpoint here}} + exporter: grpc + ``` + + For Docker Compose: + * In your docker-compose.yml file for Tyk Gateway, add the following environment variables: + + ```yaml + environment: + - TYK_GW_OPENTELEMETRY_ENABLED=true + - TYK_GW_OPENTELEMETRY_EXPORTER=grpc + - TYK_GW_OPENTELEMETRY_ENDPOINT={{Add your endpoint here}} + ``` + + Make sure to replace `` with the appropriate endpoint from your OpenTelemetry collector. + + After enabling OpenTelemetry at the Gateway level, you can activate [detailed tracing](/api-management/logs-metrics#opentelemetry) for specific APIs by editing their respective API definitions. Set the `detailed_tracing` option to either true or false. By default, this setting is false. + +2. **Configure the OpenTelemetry Collector to Export to Elasticsearch** + + To configure the OTel Collector with Elasticsearch Cloud, follow these steps: + + * Sign up for an [Elastic account](https://www.elastic.co/) if you haven't already + * Once logged in to your Elastic account, select "Observability" and click on the option "Monitor my application performance" + + Configure Elasticsearch + + * Scroll down to the APM Agents section and click on the OpenTelemetry tab + + Configure Elasticsearch + + * Search for the section "Configure OpenTelemetry in your application". You will need to copy the value of "OTEL_EXPORTER_OTLP_ENDPOINT" and "OTEL_EXPORTER_OTLP_HEADERS" in your OpenTelemetry Collector configuration file. + + Configure Elasticsearch + + * Update your OpenTelemetry Collector configuration, here's a simple example: + + ```yaml + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 # OpenTelemetry receiver endpoint + processors: + batch: + exporters: + otlp/elastic: + endpoint: "ELASTIC_APM_SERVER_ENDPOINT_GOES_HERE" #exclude scheme, e.g. HTTPS:// or HTTP:// + headers: + # Elastic APM Server secret token + Authorization: "Bearer ELASTIC_APM_SECRET_TOKEN_GOES_HERE" + service: + pipelines: + traces: + receivers: [otlp] + exporters: [otlp/elastic] + ``` + + If are running Elasticsearch locally, you will need to use your APM Server endpoint (elastic-apm-server:8200) and set-up [a secret token authorization in ElasticSearch](https://www.elastic.co/guide/en/observability/current/secret-token.html). + + You can refer to the [example configuration provided by Elastic](https://www.elastic.co/guide/en/observability/current/open-telemetry-direct.html#connect-open-telemetry-collector) for more guidance on the OpenTelemetry Collector configuration. + +3. **Explore OpenTelemetry Traces in Elasticsearch** + + * In Elasticsearch Cloud: + * Go to "Home" and select "Observability." + Configure Elasticsearch + * On the right menu, click on "APM / Services." + * Click on "tyk-gateway." + + You will see a dashboard automatically generated based on the distributed traces sent by Tyk API Gateway to Elasticsearch. + + Configure Elasticsearch + + Select a transaction to view more details, including the distributed traces: + + Configure Elasticsearch + +#### New Relic + +This guide provides a step-by-step procedure to integrate New Relic with Tyk Gateway via the OpenTelemetry Collector. At the end of this guide, you will be able to visualize traces and metrics from your Tyk Gateway on the New Relic console. + +##### Prerequisites + +- [Docker installed on your machine](https://docs.docker.com/get-docker/) +- [New Relic Account](https://newrelic.com/) +- New Relic API Key +- Gateway v5.2.0 or higher +- OTel Collector [docker image](https://hub.docker.com/r/otel/opentelemetry-collector) + +##### Steps for Configuration + +1. **Obtain New Relic API Key** + + 1. Navigate to your New Relic Console. + + 2. Go to `Profile β†’ API keys`. + + 3. Copy the key labeled as `INGEST-LICENSE`. + +
+ + + + + You can follow the [official New Relic documentation](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/) for more information. + + + + **Example token:** + + ```bash + 93qwr27e49e168d3844c5h3d1e878a463f24NZJL + ``` + +2. **Configuration Files** + + **OTel Collector Configuration YAML** + + 1. Create a file named `otel-collector-config.yml` under the configs directory. + 2. Copy the following template into that file: + + ```yaml + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + grpc: + endpoint: 0.0.0.0:4317 + processors: + batch: + exporters: + otlphttp: + endpoint: "" + headers: + api-Key: "" + extensions: + health_check: + pprof: + endpoint: :1888 + zpages: + endpoint: :55679 + service: + extensions: [pprof, zpages, health_check] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp] + ``` + + - Replace `` with your specific New Relic endpoint (`https://otlp.nr-data.net` for US or `https://otlp.eu01.nr-data.net` for EU). + - Replace `` with the API key obtained in Step 1. + + **Docker Compose configuration** + + 1. Create a file named docker-compose.yml at the root level of your project directory. + + 2. Paste the following code into that file: + + ```yaml + version: "3.9" + services: + otel-collector: + image: otel/opentelemetry-collector:latest + volumes: + - ./otel-collector-config.yml:/etc/otel-collector.yml + command: ["--config=/etc/otel-collector.yml"] + networks: + - tyk + ports: + - "1888:1888" # pprof extension + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP http receiver + - "55670:55679" # zpages extension + + networks: + tyk: + ``` +
+ + + + + Replace the variable fields with the relevant data. + + + +3. **Testing and Verifying Traces** + + 1. Run `docker-compose up -d` to start all services. + + 2. Initialize your Tyk environment. + + 3. Create a simple `httpbin` API using Tyk Dashboard. You can follow the [Tyk Dashboard documentation](/api-management/gateway-config-managing-classic#create-an-api) for more information. + + 4. Send requests to the API using cURL or Postman. + + 5. Open New Relic Console. + + 6. Navigate to `APM & Services β†’ Services - OpenTelemetry β†’ tyk-gateway`. + + New Relic Services + + 7. Wait for about 5 minutes for the data to populate. + + Traces and graphs should now be visible on your New Relic console. + + New Relic Metrics + +
+ + + + + If traces are not showing, try refreshing the New Relic dashboard. + + + +##### Troubleshooting + +- If the traces aren't appearing, double-check your API key and endpoints. +- Ensure that your Tyk Gateway and New Relic are both running and connected. + +##### Conclusion + +You have successfully integrated New Relic with Tyk Gateway via the OpenTelemetry Collector. You can now monitor and trace your APIs directly from the New Relic console. + +#### Jaeger + +##### Using Docker + +This quick start guide offers a detailed, step-by-step walkthrough for configuring Tyk API Gateway (OSS, self-managed or hybrid gateway connected to Tyk Cloud) with OpenTelemetry and [Jaeger](https://www.jaegertracing.io/) to significantly improve API observability. We will cover the installation of essential components, their configuration, and the process of ensuring seamless integration. + +For Kubernetes instructions, please refer to [How to integrate with Jaeger on Kubernetes](#using-kubernetes). + +###### Prerequisites + +Ensure the following prerequisites are met before proceeding: + +- [Docker installed on your machine](https://docs.docker.com/get-docker/) +- Gateway v5.2.0 or higher + +###### Steps for Configuration + +1. **Create the Docker-Compose File for Jaeger** + + Save the following YAML configuration in a file named docker-compose.yml: + + ```yaml + version: "2" + services: + # Jaeger: Distributed Tracing System + jaeger-all-in-one: + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" # Jaeger UI + - "4317:4317" # OTLP receiver + ``` + + This configuration sets up Jaeger's all-in-one instance with ports exposed for Jaeger UI and the OTLP receiver. + +2. **Deploy a Test API Definition** + + If you haven't configured any APIs yet, follow these steps: + + - Create a subdirectory named apps in the current directory. + - Create a new file named `apidef-hello-world.json`. + - Copy the provided simple API definition below into the `apidef-hello-world.json` file: + + + ```json + { + "name": "Hello-World", + "slug": "hello-world", + "api_id": "Hello-World", + "org_id": "1", + "use_keyless": true, + "detailed_tracing": true, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "use_extended_paths": true + } + } + }, + "proxy": { + "listen_path": "/hello-world/", + "target_url": "http://httpbin.org/", + "strip_listen_path": true + }, + "active": true + } + ``` + + This API definition sets up a basic API named Hello-World for testing purposes, configured to proxy requests to `http://httpbin.org/`. + +3. **Run Tyk Gateway OSS with OpenTelemetry Enabled** + + To run Tyk Gateway with OpenTelemetry integration, extend the previous Docker Compose file to include Tyk Gateway and Redis services. Follow these steps: + + - Add the following configuration to your existing docker-compose.yml file: + + ```yaml + # ... Existing docker-compose.yml content for jaeger + + tyk: + image: tykio/tyk-gateway:v5.2.0 + ports: + - 8080:8080 + environment: + - TYK_GW_OPENTELEMETRY_ENABLED=true + - TYK_GW_OPENTELEMETRY_EXPORTER=grpc + - TYK_GW_OPENTELEMETRY_ENDPOINT=jaeger-all-in-one:4317 + volumes: + - ${TYK_APPS:-./apps}:/opt/tyk-gateway/apps + depends_on: + - redis + + redis: + image: redis:4.0-alpine + ports: + - 6379:6379 + command: redis-server --appendonly yes + ``` + + - Navigate to the directory containing the docker-compose.yml file in your terminal. + - Execute the following command to start the services: + + ```bash + docker compose up + ``` + +4. **Explore OpenTelemetry Traces in Jaeger** + + - Start by sending a few requests to the API endpoint configured in Step 2: + ```bash + curl http://localhost:8080/hello-world/ -i + ``` + + - Access Jaeger at [http://localhost:16686](http://localhost:16686). + - In Jaeger's interface: + - Select the service named tyk-gateway. + - Click the *Find Traces* button. + + You should observe traces generated by Tyk Gateway, showcasing the distributed tracing information. + + Tyk API Gateway distributed trace in Jaeger + + Select a trace to visualize its corresponding internal spans: + + Tyk API Gateway spans in Jaeger + + +##### Using Kubernetes + +This quick start guide offers a detailed, step-by-step walkthrough for configuring Tyk Gateway OSS with OpenTelemetry and [Jaeger](https://www.jaegertracing.io/) on Kubernetes to significantly improve API observability. We will cover the installation of essential components, their configuration, and the process of ensuring seamless integration. + +For Docker instructions, please refer to [How to integrate with Jaeger on Docker](#using-docker). + + +###### Prerequisites + +Ensure the following prerequisites are in place before proceeding: + +- A functional Kubernetes cluster +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) and [helm](https://helm.sh/docs/intro/install/) CLI tools installed + +###### Steps for Configuration + +1. **Install Jaeger Operator** + + For the purpose of this tutorial, we will use jaeger-all-in-one, which includes the Jaeger agent, collector, query, and UI in a single pod with in-memory storage. This deployment is intended for development, testing, and demo purposes. Other deployment patterns can be found in the [Jaeger Operator documentation](https://www.jaegertracing.io/docs/1.51/operator/#deployment-strategies). + + + 1. Install the cert-manager release manifest (required by Jaeger) + + ```bash + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.yaml + ``` + + 2. Install [Jaeger Operator](https://www.jaegertracing.io/docs/1.51/operator/). + + ```bash + kubectl create namespace observability + kubectl create -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.51.0/jaeger-operator.yaml -n observability + + ``` + + 3. After the Jaeger Operator is deployed to the `observability` namespace, create a Jaeger instance: + + ```bash + kubectl apply -n observability -f - < + + + +Please make sure you are installing Redis versions that are supported by Tyk. Please refer to Tyk docs to get list of [supported versions](/tyk-self-managed/install#redis). + + + + + Tyk Gateway is now accessible through service gateway-svc-tyk-oss-tyk-gateway at port 8080 and exports the OpenTelemetry traces to the `jaeger-all-in-one-collector` service. + +3. **Deploy Tyk Operator** + + Deploy Tyk Operator to manage APIs in your cluster: + + ```bash + kubectl create namespace tyk-operator-system + kubectl create secret -n tyk-operator-system generic tyk-operator-conf \ + --from-literal "TYK_AUTH=$APISecret" \ + --from-literal "TYK_ORG=org" \ + --from-literal "TYK_MODE=ce" \ + --from-literal "TYK_URL=http://gateway-svc-tyk-otel-tyk-gateway.tyk.svc:8080" \ + --from-literal "TYK_TLS_INSECURE_SKIP_VERIFY=true" + helm install tyk-operator tyk-helm/tyk-operator -n tyk-operator-system + + ``` + +4. **Deploy a Test API Definition** + + Save the following API definition as `apidef-hello-world.yaml`: + + ```yaml + apiVersion: tyk.tyk.io/v1alpha1 + kind: ApiDefinition + metadata: + name: hello-world + spec: + name: hello-world + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org/ + listen_path: /hello-world + strip_listen_path: true + ``` + + To apply this API definition, run the following command: + + ```bash + kubectl apply -f apidef-hello-world.yaml + ``` + + This step deploys an API definition named hello-world using the provided configuration. It enables a keyless HTTP API proxying requests to http://httpbin.org/ and accessible via the path /hello-world. + +5. **Explore OpenTelemetry traces in Jaeger** + + You can use the kubectl `port-forward command` to access Tyk and Jaeger services running in the cluster from your local machine's localhost: + + For Tyk API Gateway: + + ```bash + kubectl port-forward service/gateway-svc-tyk-otel-tyk-gateway 8080:8080 -n tyk + ``` + + For Jaeger: + + ```bash + kubectl port-forward service/jaeger-all-in-one-query 16686 -n observability + ``` + + Begin by sending a few requests to the API endpoint configured in step 2: + + ```bash + curl http://localhost:8080/hello-world/ -i + ``` + + Next, navigate to Jaeger on `http://localhost:16686`, select the Β΄serviceΒ΄ called Β΄tyk-gatewayΒ΄ and click on the button Β΄Find tracesΒ΄. You should see traces generated by Tyk: + + Tyk API Gateway distributed trace in Jaeger + + Click on a trace to view all its internal spans: + + Tyk API Gateway spans in Jaeger + +## OpenTracing (deprecated) + + + +**Deprecation** + +The CNCF (Cloud Native Foundation) has archived the OpenTracing project. This means that no new pull requests or feature requests are accepted into OpenTracing repositories. + +We introduced support for [OpenTelemetry](/api-management/logs-metrics#opentelemetry) in Tyk v5.2. We recommend that users migrate to OpenTelemetry for better support of your tracing needs. + +OpenTracing is now deprecated in Tyk products. + + + +### OpenTracing tools with legacy Tyk integration + +- [Jaeger](/api-management/logs-metrics#jaeger-1) +- [Zipkin](/api-management/logs-metrics#zipkin) +- [New Relic](/api-management/logs-metrics#new-relic-1) + +### Enabling OpenTracing + +OpenTracing can be configured at the Gateway level by adding the following configuration to your Gateway configuration (typically via the `tyk.conf` file or equivalent [environment variables](/tyk-oss-gateway/configuration). + +```.json +{ + "tracing": { + "enabled": true, + "name": "${tracer_name}", + "options": {} + } +} +``` + +Where: +- `name` is the name of the supported tracer +- `enabled`: set this to true to enable tracing +- `options`: key/value pairs for configuring the enabled tracer. See the + supported tracer documentation for more details. + +Tyk will automatically propagate tracing headers to APIs when tracing is enabled. + +### Jaeger + + + +Tyk's OpenTelemetry Tracing works with Jaeger and we recommend following our guide to [use OpenTelemetry with Jaeger](/api-management/logs-metrics#jaeger) rather than the following deprecated Open Tracing method. + + + +Prior to Tyk 5.2, you cannot use OpenTelemetry and so must use [OpenTracing](https://opentracing.io/) with the [Jaeger client libraries](https://www.jaegertracing.io/docs/1.11/client-libraries/) to send Tyk Gateway traces to Jaeger. + +**Configuring Jaeger** + +In `tyk.conf` on `tracing` setting + +```{.json} +{ + "tracing": { + "enabled": true, + "name": "jaeger", + "options": {} + } +} +``` + +`options` are settings that are used to initialise the Jaeger client. For more details about the options [see client libraries](https://www.jaegertracing.io/docs/1.11/client-libraries/) + +**Sample configuration** + +```{.json} +{ + "tracing": { + "enabled": true, + "name": "jaeger", + "options": { + "baggage_restrictions": null, + "disabled": false, + "headers": null, + "reporter": { + "BufferFlushInterval": "0s", + "collectorEndpoint": "", + "localAgentHostPort": "jaeger:6831", + "logSpans": true, + "password": "", + "queueSize": 0, + "user": "" + }, + "rpc_metrics": false, + "sampler": { + "maxOperations": 0, + "param": 1, + "samplingRefreshInterval": "0s", + "samplingServerURL": "", + "type": "const" + }, + "serviceName": "tyk-gateway", + "tags": null, + "throttler": null + } + } +} +``` + +### New Relic + + + +Tyk's OpenTelemetry Tracing works with New Relic and we recommend following our guide to [use OpenTelemetry with New Relic](/api-management/logs-metrics#new-relic) rather than the following deprecated Open Tracing method. + + + +Prior to Tyk 5.2, you cannot use OpenTelemetry and so must use [OpenTracing](https://opentracing.io/) to send Tyk Gateway traces to [*New Relic*](https://newrelic.com/) using the *Zipkin* format.
+ +**Configuring New Relic** + +In `tyk.conf` under the `tracing` section + +```.json +{ + "tracing": { + "enabled": true, + "name": "zipkin", + "options": {} + } +} +``` + +In the `options` setting you can set the initialisation of the *Zipkin* client. + +**Sample configuration** + +```.json +{ + "tracing": { + "enabled": true, + "name": "zipkin", + "options": { + "reporter": { + "url": "https://trace-api.newrelic.com/trace/v1?Api-Key=NEW_RELIC_LICENSE_KEY&Data-Format=zipkin&Data-Format-Version=2" + } + } + } +} +``` + +`reporter.url` is the URL to the *New Relic* server, where trace data will be sent to. + +### Zipkin + +Prior to Tyk 5.2, you cannot use OpenTelemetry and so must use [OpenTracing](https://opentracing.io/) with the [Zipkin Go tracer](https://zipkin.io/pages/tracers_instrumentation) to send Tyk Gateway traces to Zipkin. + +**Configuring Zipkin** + +In `tyk.conf` on `tracing` setting + +```{.json} +{ + "tracing": { + "enabled": true, + "name": "zipkin", + "options": {} + } +} +``` + +`options` are settings that are used to initialise the Zipkin client. + +**Sample configuration** + +```{.json} +{ + "tracing": { + "enabled": true, + "name": "zipkin", + "options": { + "reporter": { + "url": "http:localhost:9411/api/v2/spans" + } + } + } +} +``` + +`reporter.url` is the URL to the Zipkin server, where trace data will be sent. diff --git a/api-management/manage-apis/api-operations/api-observability.mdx b/api-management/manage-apis/api-operations/api-observability.mdx new file mode 100644 index 000000000..270cc6e21 --- /dev/null +++ b/api-management/manage-apis/api-operations/api-observability.mdx @@ -0,0 +1,63 @@ +--- +title: "API Observability" +description: "Explains how to achieve API observability through Open Telemetry signals such as traces, metrics and logs" +keywords: "API Observability, Distributed Tracing, Metrics, Logs, Logging, Open Telemetry, OTel" +sidebarTitle: "Monitoring and Troubleshooting APIs" +--- + +API observability is the process of monitoring and analyzing APIs to gain insights into developer and end-user experience and to ensure the reliability of your system. + +You can achieve API observability by using a combination of telemetry signals such as traces, metrics, and logs. Each of these signals serves a specific purpose in monitoring and troubleshooting API issues: + +## Distributed tracing + +Distributed traces provide a detailed, end-to-end view of a single API request or transaction as it traverses through various services and components. Traces are crucial for understanding the flow of requests and identifying bottlenecks or latency issues. Here's how you can make use of traces for API observability: + +- **End-to-end request tracing:** Implement distributed tracing across your microservices architecture to track requests across different services and gather data about each service's contribution to the overall request latency. + +- **Transaction Flow:** Visualize the transaction flow by connecting traces to show how requests move through different services, including entry points (e.g., API gateway), middleware and backend services. + +- **Latency Analysis:** Analyze trace data to pinpoint which service or component is causing latency issues, allowing for quick identification and remediation of performance bottlenecks. + +- **Error Correlation:** Use traces to correlate errors across different services to understand the root cause of issues and track how errors propagate through the system. + + +From v5.2+, Tyk supports OpenTelemetry standard for tracing. You can configure Tyk to work with an [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) or integrate it with any [observability vendor supporting OpenTelemetry](https://opentelemetry.io/ecosystem/vendors/) to capture traces of API requests as they flow through Tyk API Gateway and any upstream services. + +Explore our guides for [Datadog](/api-management/logs-metrics#datadog), [Dynatrace](/api-management/logs-metrics#dynatrace), [Jaeger](/api-management/logs-metrics#using-docker) and [New Relic](/api-management/logs-metrics#new-relic) for further info on how to integrate with 3rd party observability vendors. + +Tyk also supports OpenTracing (now deprecated), but we recommend users to start migrating to OpenTelemetry for a comprehensive, vendor-neutral technology with wide industry support. + +## Metrics + +Metrics provide aggregated, quantitative data about the performance and behavior of an API over time. They offer insights into the overall health of the system. Here's how you can leverage metrics for API observability: + +- **Key Performance Indicators (KPIs):** Define and track essential metrics such as request rate, response time, error rate and resource utilization to monitor the overall health and performance of the API. + +- **Custom Metrics:** Create custom metrics that are specific to your API's functionality or business objectives. For example, track the number of successful payments processed or the number of users signed up. + +- **Threshold Alerts:** Set up alerts based on predefined thresholds for metrics to receive notifications when API performance deviates from the expected norm. + +- **Trend Analysis:** Analyze metric trends over time to identify long-term performance patterns, plan for scaling and detect anomalies. + + +Tyk offers built-in metrics and analytics in [Tyk Dashboard](/api-management/dashboard-configuration#traffic-analytics) through Tyk API Gateway and Tyk Pump. These metrics provide insights into API usage, traffic patterns and response times. The built-in metrics allow you to track overall API traffic, detailed API analytics including: request count, response time distribution and error rates. Furthermore, API usage can be tracked on a per-key basis. + +You can also use Tyk Pump to export those metrics to [different back-ends](/api-management/tyk-pump#external-data-stores). Here is an example of using Tyk Pump to send [API analytics metrics to Prometheus and Grafana](https://tyk.io/blog/service-level-objectives-for-your-apis-with-tyk-prometheus-and-grafana/). From v5.2+, you can also leverage the OpenTelemetry spans exported from Tyk Gateway to calculate and export [span metrics](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/spanmetricsconnector/README.md) from the OpenTelemetry collector. + +## Logs + +Logs provide detailed records of events and activities within the API and its associated services. Logs are invaluable for debugging issues and understanding what happened at a specific point in time. Here's how you can utilize logs for API observability: + +- **Error Identification:** Use logs to identify errors, exceptions, and warning messages that indicate issues with the API's behavior. + +- **Debugging:** Logs help developers troubleshoot and debug issues by providing detailed information about the sequence of events leading up to a problem. + +- **Security Monitoring:** Monitor logs for security-related events, such as authentication failures, access control violations and suspicious activities. + +- **Audit Trail:** Maintain an audit trail of important actions and changes to the API, including configuration changes, access control changes and data updates. + + +Tyk allows you to capture and analyze logs related to API requests and responses in the [Log Browser](/api-management/dashboard-configuration#activity-logs) . You can optionally enable detailed recording for the requests per API level or per Key level to store inbound request and outbound response data. You can [enable debug modes](/api-management/troubleshooting-debugging#capturing-detailed-logs) for selected APIs and send the detail logs to one or more Pump backend instances. + +To achieve comprehensive API observability, it is essential to integrate traces, metrics and logs into the observability tools that the team in charge of the APIs are already using. Those tools should allow users to query and visualize data, set up alerts and provide an intuitive interface for monitoring and troubleshooting API issues effectively. See also our 7 observability anti-pattern to avoid when working with APIs: [Bad API observability](https://tyk.io/blog/bad-api-observability/). diff --git a/api-management/manage-apis/deploy-apis/deploy-apis-overview.mdx b/api-management/manage-apis/deploy-apis/deploy-apis-overview.mdx new file mode 100644 index 000000000..2c3c07b25 --- /dev/null +++ b/api-management/manage-apis/deploy-apis/deploy-apis-overview.mdx @@ -0,0 +1,56 @@ +--- +title: "API Creation Methods" +description: "Different ways to create and manage APIs in Tyk" +keywords: "API Management, API Configuration, Dashboard, Tyk Sync, Tyk Operator" +sidebarTitle: "API Creation Methods" +--- + +This page explains the different methods available for creating and managing APIs in Tyk, each suited to different use cases and workflow requirements. + +## File-based configuration + + +Load API configurations directly to the `/apps` folder using JSON API specifications. This method is available for open source users and is ideal for testing gateway and API configurations. + +**Use case:** Testing and experimentation in development environments. + +**Learn more:** +* [Create an API in file-based mode](/api-management/gateway-config-managing-classic#create-an-api-in-file-based-mode) + +## Dashboard UI + +Create and configure APIs through the web-based Dashboard interface. Changes take effect immediately, making this method suitable for learning, testing, and proof-of-concept work. + +**Use case:** Manual API management, learning, and proof-of-concept projects. + +**Learn more:** +* [Create an API with the Dashboard](/api-management/gateway-config-managing-classic#create-an-api-with-the-dashboard) + +## Dashboard and Gateway API + +Programmatically create and manage APIs, policies, keys, and developer portals using REST APIs. This method provides flexibility for automation but requires imperative scripting. + +**Use case:** Programmatic API management and basic automation needs. + +**Learn more:** +- [Dashboard API](/api-management/dashboard-configuration#exploring-the-dashboard-api) +- [Gateway API](/tyk-gateway-api) + +## Tyk Sync + +Manage API configurations declaratively using version-controlled files. Tyk Sync enables GitOps workflows by maintaining API configurations as code that can be versioned and deployed through CI/CD pipelines. + +**Use case:** GitOps workflows and teams requiring version-controlled API configurations. + +**Learn more:** +- [Tyk Sync](/api-management/automations/sync) + +## Tyk Operator + +Kubernetes-native API management using Custom Resource Definitions (CRDs). Tyk Operator provides declarative configuration with automatic drift detection and reconciliation in Kubernetes environments. + +**Use case:** Kubernetes-native environments requiring automated API lifecycle management. + +**Learn more:** +- [Tyk Operator](/api-management/automations/operator#what-is-tyk-operator) +- [Using Tyk Operator to enable GitOps](/api-management/automations) diff --git a/api-management/mdcb.mdx b/api-management/mdcb.mdx new file mode 100644 index 000000000..dec05e082 --- /dev/null +++ b/api-management/mdcb.mdx @@ -0,0 +1,984 @@ +--- +title: "Tyk Multi Data Center Bridge (MDCB): Centralized API Governance Across Distributed Environments" +description: "How to configure Multi Data Center Bridge" +keywords: "MDCB, Multi Data Center Bridge, Control Plane, Data Plane, Synchroniser" +sidebarTitle: "Manage Distributed Gateways" +--- + +## Overview + +Tyk’s Multi Data Center Bridge (MDCB) is a separately licensed extension to the Tyk control plane that performs management and synchronisation of logically or geographically distributed clusters of Tyk API Gateways. We use it ourselves to support our Tyk Cloud offering. + +### Challenges in Distributed Environment + +When your users are spread geographically and want to access your APIs from different parts of the world you can optimize the performance, value and utility of your APIs by deploying API Gateways in data centers local to them. + +Single API gateway + +Having localised gateways offers benefits to you and your users, such as: + +- Reduced latency (roundtrip time) for users by accessing a local data center +- Deployment close to backend services, reducing interconnect costs and latencies +- Increased availability across your estate - if one region goes offline the rest will continue to serve users +- Compliance with data residency and sovereignty regulations + +Distributed API gateways + +This distributed architecture, however, introduces challenges for you in terms of managing the configuration, synchronisation and resilience of the Gateways in each data center. + +- How do you configure each of the Tyk API Gateways to ensure that a user can access only their authorized APIs, but from any location? +- How can you ensure that the correct APIs are deployed to the right Gateways - and kept current as they are updated? + +As the complexity of your architecture increases, this maintenance becomes an increasingly difficult and expensive manual task. + +This is where Tyk’s Multi Data Center Bridge (MDCB) comes in. + +### How does Tyk Multi Data Center Bridge help? + +The Tyk MDCB makes it possible to manage federated global deployments easily, from a central Dashboard: you can confidently deploy a multi-data center, geographically isolated set of Tyk Gateway clusters for maximum redundancy, failover, latency optimization, and uptime. + +Combining Tyk Dashboard with MDCB, you are provided with a β€œsingle pane of glass” or control plane that allows you to centrally manage multiple Tyk Gateway clusters. This has many advantages over having separate gateways and corresponding dashboard/portals, which would require manual synchronisation to roll out any changes (e.g. new APIs) across all the individual gateways. + +By deploying MDCB, API Management with Tyk becomes a service that can be easily offered to multiple teams from a centralised location. + +Distributed API Gateways with MDCB + +### How does MDCB work? + +MDCB acts as a broker between the Tyk Gateway instances that you deploy in data centers around the world. A single Control Plane (Management) Gateway is used as reference: you configure APIs, keys and quotas in one central location; MDCB looks after the propagation of these to the Data Plane (or Worker) Gateways, ensuring the synchronisation of changes. + +MDCB is extremely flexible, supporting clusters of Tyk Gateways within or across data centers - so for example two clusters within the same data center could run different configurations of APIs, users etc. + +MDCB keeps your Tyk API Gateways highly available because all the Worker Gateways, where your users access your APIs, can be configured and run independently. If the MDCB link back to the Management Gateway goes down, the Workers will continue to service API requests; when the link is back up, MDCB will automatically refresh the Workers with any changes they missed. + +Multi Data Center Bridge is down + +What happens if the worst happens and Worker Gateways fail while the link to the Control Plane is down? We’ve thought of that: Tyk will automatically configure the new Workers that spin up using the last known set of API resources in the worker’s cluster, minimizing the impact on availability of your services. + +### When might you deploy MDCB? + +#### Managing geographically distributed gateways to minimize latency and protect data sovereignty + +Consider Acme Global Bank: they have customers in the USA and the EU. Due to compliance, security and performance requirements they need to deploy their Tyk API Gateways locally in each of those regions. They need to manage the deployment and synchronisation of APIs and associated resources (e.g. keys, policies and certificates) between the data centers to ensure global service for their customers. + +Acme Global Bank without MDCB + +Tyk MDCB enables Acme Global Bank to power this architecture by creating a primary data center with all the Tyk Control Plane components and secondary (worker) data centers that act as local caches to run validation and rate limiting operations to optimize latency and performance. + +Acme Global Bank with MDCB + +#### Managing a complex deployment of services with internal and externally facing APIs + +Consider Acme Telecoms: they have a large nationally distributed workforce and complex self-hosted IT systems; are using Tyk API Gateways to deploy internal and external APIs; and have different teams managing and consuming different sets of APIs across multiple sites. They need to ensure data segregation, availability, and access for internal and external users and partners. + +Acme Telecoms without MDCB + +Combining Tyk’s built-in multi-tenancy capability with MDCB enables Acme Telecoms to set up dedicated logical gateways for different user groups and different physical gateways to guarantee data segregation, with a single management layer for operational simplicity. + +Acme Telecoms with MDCB + +### Why Choose MDCB for Your API Infrastructure? + +Beyond the two usage scenarios described here, there are many others where MDCB will provide you with the power and flexibility you need to manage your own particular situation. + +Here are some examples of the benefits that deploying Tyk MDCB can bring: + +#### Flexible architecture + +- You can control geographic distribution of traffic, restricting traffic to data centers/regions of your choice. +- You can put your Tyk API Gateways close to users, but still have a single management layer. +- You have a single, simple, point of access for configuration of your complex API infrastructure and yet deploy multiple Developer Portals, if required, to provide access to different user groups (e.g. Internal and External). +- You can physically [segment teams and environments](/api-management/multiple-environments#gateway-sharding) within a single physical data center, giving each team full control of its own API gateway and server resources without the noisy neighbors you might experience in a standard self-managed deployment. +- You can deploy gateways with whichever mix of cloud vendors you wish. +- You can mix and match cloud and on premises data centers. + +#### Improved resiliency, security and uptime + +- Each Data Plane (Worker) Gateway operates autonomously using a locally stored copy of the API resources it needs. +- The Control Plane (Management) Gateway maintains synchronisation of these configurations across your Tyk deployment via the MDCB backbone link. +- If the Management Gateway or MDCB backbone fails, the Workers will continue to handle API requests, rejecting only new authorization tokens created on other Gateways. When connectivity is restored, the Worker Gateways will hot-reload to fetch any updated configurations (e.g. new authorization tokens) from the Control Plane. +- If a Worker Gateway fails, this does not impact the operation of the others: when it comes back online, if it is unable to contact the Control Plane, it will retrieve the β€œlast good” configuration held locally. +- The MDCB backbone runs on a resilient compressed RPC channel that is designed to handle ongoing and unreliable connectivity; all traffic on the backbone is encrypted and so safer to use over the open internet or inter-data center links. +- Improved data security through separation of traffic into completely separate clusters within your network. + +#### Reduced latency + +- Deploying Data Plane (Worker) Gateways close to your geographically distributed API consumers helps reduce their perceived request latency. +- Deploying Worker Gateways close to your backend services will minimize round trip time servicing API requests. +- The Worker Gateways cache keys and other configuration locally, so all operations can be geographically localised. +- All traffic to and from one Gateway cluster will have rate limiting, authentication and authorization performed within the data center rather than β€œcalling home” to a central control point; this reduces the API request round trip time. + +#### Improved Infrastructure Management + +- Due to the shared control plane, all Worker Gateways report into a single Tyk Dashboard. This provides a simple, consistent place to manage your APIM deployment. +- This allows a shared infra team to offer API management and API Gateways as a service, globally, across multiple clouds and Self-Managed regions, from a single pane of glass. + +#### Next Steps + +- [The components of an MDCB deployment](/api-management/mdcb#mdcb-components) +- [Run an MDCB Proof of Concept](/api-management/mdcb#minimizing-latency-with-mdcb) +- [MDCB reference guide](/tyk-multi-data-centre/mdcb-configuration-options) + +## MDCB Components + +### Overview + +Here we will give an overview of the main elements of a Tyk Multi Data Center (distributed) solution, clarifying the terminology used by Tyk. +A Tyk Multi Data Center Bridge deployment + +#### Tyk Gateway +- The workhorse of any deployment, Tyk’s lightweight Open Source API gateway that exposes your APIs for consumption by your users. It is a reverse proxy that secures your APIs, manages session and policies, monitors, caches and manipulates requests/responses when needed before/after it proxies them to and from the upstream. + +#### Tyk Dashboard +- Tyk’s management platform used to control the creation of API configurations, policies and keys in a persistent manner. It provides analytic information on the traffic the Gateways have processed which includes aggregated API usage and detailed information per transaction. + +#### Tyk Multi Data Center Bridge (MDCB) +- The backbone of the distributed Tyk deployment, connecting the distributed Data Plane deployments back to the Control Plane. + +#### Tyk Pump +- Tyk’s open source analytics purger that can be used to export transaction logs from the Tyk deployment to the visualisation tool or other data store of your choice + +#### Tyk Developer Portal +- The access point for your API Consumers where you publish your API catalog(s) and they obtain API keys. + +#### Redis +- An in-memory data store used as a database, cache and message broker. We use it as pub/sub broker for inter-Gateway communication, and as a cache for API configurations, keys, certificates, and temporary store for analytics records. + +#### MongoDB/SQL +- A persistent data store for API configurations, policies, analytics and aggregated analytics, Dashboard organizations, configurations, dashboard users, portal developers and configuration. + + +### Control Plane +The Tyk Control Plane + +The Control Plane must consist of the following elements: +- **Tyk Dashboard** (used to configure and control the whole Tyk installation) +- **Tyk Gateway** (used for creation of keys and certificates, this does not service API requests; it is important to ensure there is no public access to it and it must not be sharded (tagged) as it "belongs" to the whole Tyk installation) +- **Tyk MDCB** +- **Redis** (high availability Redis data store that should be backed up in case of failure; this [document](https://redis.io/docs/management/persistence/) gives recommendation on Redis persistency) +- **MongoDB or SQL** (a persistent data store that should be deployed and set up for redundancy and high availability) + +To improve resilience and availability, multiple instances of each Tyk component should be deployed and load balanced within the Control Plane. + +#### Optional Components +- One or more **Tyk Pumps** can be deployed within the Control Plane to export analytics data (request/response logs) to your [data sink of choice](/api-management/tyk-pump#external-data-stores) for further analytics and visualisation. +- A **Tyk Developer Portal** can be added to enhance the end-user experience when accessing your APIs. + +### Data Plane +The Tyk Data Plane + +The Data Plane deployment must consist of the following elements: +- **Tyk Gateway** (one or more Gateways specifically configured as Workers) +- **Redis** (a single Redis data store shared by all Gateways in the cluster) + +To provide resilience and availability, multiple Gateways should be deployed and load balanced within the cluster. +If you want this Data Plane deployment to be resilient, available, and independent from the Control Plane during a disconnection event, it is advised to make the Redis data store persistent. + +#### Optional Components +- A **Tyk Pump** specifically configured as a [Hybrid Pump](/product-stack/tyk-charts/tyk-data-plane-chart#hybrid-pump) can be deployed with the Data Plane gateways to export analytics data (request/response logs) to your [data sink of choice](/api-management/tyk-pump#external-data-stores) for further analytics and visualisation. + +## Setup MDCB Control Plane + +The [Tyk control plane](/api-management/mdcb#control-plane) contains all the +standard components of a standard Tyk Self-Managed installation with the addition of the Multi Data Center Bridge (MDCB). + +### Installing MDCB Component On Linux +The MDCB component must be able to connect to Redis and MongoDB/PostgreSQL directly from within the Control Plane deployment. It does not require access to the Tyk Gateway(s) or Dashboard application. + +The MDCB component will however, by default, expose an RPC service on port 9091, to which the [Tyk Data Plane](/api-management/mdcb#data-plane) data centers, i.e. the worker gateway(s) that serves API traffic, will need connectivity. + +#### Prerequisites +We will assume that your account manager has provided you with a valid MDCB and Dashboard License and the command to enable you to download the MDCB package. +We will assume that the following components are up and running in your Controller DC: + +* MongoDB or SQL (check [supported versions](/planning-for-production/database-settings)) +* Redis (check [supported versions](/tyk-self-managed/install#redis)) +* Tyk Dashboard +* Tyk Gateway / Gateways Cluster +* Working Tyk-Pro [Self-Managed installation](/tyk-self-managed/install) + + + + + When using SQL rather than MongoDB in a production environment, we only support PostgreSQL. + + + +#### Installing using RPM and Debian packages +To download the relevant MDCB package from PackageCloud: + +```curl +curl -s https://packagecloud.io/install/repositories/tyk/tyk-mdcb-stable/script.deb.sh | sudo bash +``` + +```curl +curl -s https://packagecloud.io/install/repositories/tyk/tyk-mdcb-stable/script.rpm.sh | sudo bash +``` + +After the relevant script for your distribution has run, the script will let you know it has finished with the following message: + +`The repository is setup! You can now install packages.` + +You will now be able to install MDCB as follows: + +```curl +sudo apt-get install tyk-sink +``` + +Or + +```curl +sudo yum install tyk-sink +``` + +### Installing in a Kubernetes Cluster with our Helm Chart + +The [Tyk Control Plane](/product-stack/tyk-charts/tyk-control-plane-chart) helm chart is pre-configured to install Tyk control plane for multi data center API management from a single Dashboard with the MDCB component. + +Below is a concise instruction on how to set up an MDCB Control Plane with Redis and PostgreSQL. + +To access the comprehensive installation instructions and configuration options, please see [Tyk Control Plane Helm Chart](/product-stack/tyk-charts/tyk-control-plane-chart). + +#### Prerequisites +- [Kubernetes 1.19+](https://kubernetes.io/docs/setup/) +- [Helm 3+](https://helm.sh/docs/intro/install/) +- MDCB and Dashboard license + +#### Quick Start + +1. **Setup required credentials** + + First, you need to provide Tyk Dashboard and MDCB license, admin email and password, and API keys. We recommend to store them in secrets. + + ```bash + NAMESPACE=tyk-cp + + API_SECRET=changeit + ADMIN_KEY=changeit + ADMIN_EMAIL=admin@default.com + ADMIN_PASSWORD=changeit + DASHBOARD_LICENSE=changeit + MDCB_LICENSE=changeit + SECURITY_SECRET=changeit + OPERATOR_LICENSE=changeit + + kubectl create namespace $NAMESPACE + + kubectl create secret generic my-secrets -n $NAMESPACE \ + --from-literal=APISecret=$API_SECRET \ + --from-literal=AdminSecret=$ADMIN_KEY \ + --from-literal=DashLicense=$DASHBOARD_LICENSE \ + --from-literal=OperatorLicense=$OPERATOR_LICENSE + + kubectl create secret generic mdcb-secrets -n $NAMESPACE \ + --from-literal=MDCBLicense=$MDCB_LICENSE \ + --from-literal=securitySecret=$SECURITY_SECRET + + kubectl create secret generic admin-secrets -n $NAMESPACE \ + --from-literal=adminUserFirstName=Admin \ + --from-literal=adminUserLastName=User \ + --from-literal=adminUserEmail=$ADMIN_EMAIL \ + --from-literal=adminUserPassword=$ADMIN_PASSWORD + ``` + +2. **Install Redis (if you don't already have Redis installed)** + + If you do not already have Redis installed, you may use these charts provided by Bitnami. + + ```bash + helm upgrade tyk-redis oci://registry-1.docker.io/bitnamicharts/redis -n $NAMESPACE --install --version 19.0.2 + ``` + Follow the notes from the installation output to get connection details and password. The DNS name of your Redis as set by Bitnami is `tyk-redis-master.tyk-cp.svc:6379` (Tyk needs the name including the port) + + The Bitnami chart also creates a secret `tyk-redis` which stores the connection password in `redis-password`. We will make use of this secret in installation later. + + + +Ensure that you are installing Redis versions that are supported by Tyk. Please consult the list of [supported versions](/tyk-self-managed/install#redis) that are compatible with Tyk. + + + +3. **Install PostgreSQL (if you don't already have PostgreSQL installed)** + + If you do not already have PostgreSQL installed, you may use these charts provided by Bitnami. + + ```bash + helm upgrade tyk-postgres oci://registry-1.docker.io/bitnamicharts/postgresql --set "auth.database=tyk_analytics" -n $NAMESPACE --install --version 14.2.4 + ``` + + Follow the notes from the installation output to get connection details. + + We require the PostgreSQL connection string for Tyk installation. This can be stored in a secret and will be used in installation later. + + ```bash + POSTGRESQLURL=host=tyk-postgres-postgresql.$NAMESPACE.svc\ port=5432\ user=postgres\ password=$(kubectl get secret --namespace $NAMESPACE tyk-postgres-postgresql -o jsonpath="{.data.postgres-password}" | base64 -d)\ database=tyk_analytics\ sslmode=disable + + kubectl create secret generic postgres-secrets -n $NAMESPACE --from-literal=postgresUrl="$POSTGRESQLURL" + ``` + + + +Ensure that you are installing PostgreSQL versions that are supported by Tyk. Please consult the list of [supported versions](/api-management/dashboard-configuration#supported-database) that are compatible with Tyk. + + + +4. **Install Tyk Control Plane** + + ```bash + helm repo add tyk-helm https://helm.tyk.io/public/helm/charts/ + + helm repo update + + helm upgrade tyk-cp tyk-helm/tyk-control-plane -n $NAMESPACE \ + --install \ + --set global.adminUser.useSecretName=admin-secrets \ + --set global.secrets.useSecretName=my-secrets \ + --set tyk-mdcb.mdcb.useSecretName=mdcb-secrets \ + --set global.redis.addrs="{tyk-redis-master.$NAMESPACE.svc:6379}" \ + --set global.redis.passSecret.name=tyk-redis \ + --set global.redis.passSecret.keyName=redis-password \ + --set global.postgres.connectionStringSecret.name=postgres-secrets \ + --set global.postgres.connectionStringSecret.keyName=postgresUrl + ``` + +5. **Done!** + + Now Tyk Dashboard and Tyk MDCB should be accessible through service `dashboard-svc-tyk-cp-tyk-dashboard` at port `3000` and `mdcb-svc-tyk-cp-tyk-mdcb` at port `9091` respectively. You can login to Dashboard using the admin email and password to start managing APIs. + + You can use the MDCB connection details included in the installation output, to install the [MDCB Data Plane](/api-management/mdcb#setup-mdcb-data-plane). + +### Configuration +If you install MDCB component with package, modify your `/opt/tyk-sink/tyk_sink.conf` file as follows: + +#### Configuration Example +```json +{ + "listen_port": 9091, + "healthcheck_port": 8181, + "server_options": { + "use_ssl": false, + "certificate": { + "cert_file": "", + "key_file": "" + }, + "min_version": 771 + }, + "storage": { + "type": "redis", + "host": "localhost", + "port": 6379, + "username": "", + "password": "", + "enable_cluster": false, + "redis_use_ssl": false, + "redis_ssl_insecure_skip_verify": false + }, + "basic-config-and-security/security": { + "private_certificate_encoding_secret": "" + }, + "hash_keys": true, + "forward_analytics_to_pump": true, + "ignore_tag_prefix_list": [ + + ], + "analytics": { + "mongo_url": "mongodb://localhost/tyk_analytics", + "mongo_use_ssl": false, + "mongo_ssl_insecure_skip_verify": false + }, + "license": "MDCB_LICENSE_KEY" +} +``` + + + +From MDCB 2.0+, you can choose between Mongo or SQL databases to setup your `analytics` storage. In order to setup your PostgreSQL storage, you can use the same configuration from your [Tyk Dashboard main storage](/planning-for-production/database-settings#postgresql). + +For example, to set up a `postgres` storage the `analytics` configurations would be: + +```json +{ +... + ... + "analytics": { + "type": "postgres", + "connection_string": "user=postgres_user password=postgres_password database=dbname host=potgres_host port=postgres_port", + "table_sharding": false + }, +} +``` +This storage will work for fetching your organization data (APIs, Policies, etc) and for analytics. + + + +You should now be able to start the MDCB service, check that it is up and running and ensure that the service starts on system boot: + +```console +sudo systemctl start tyk-sink +``` + + +```console +sudo systemctl enable tyk-sink +``` + +### Health check + +It is possible to perform a health check on the MDCB service. This allows you to determine if the service is running, so is useful when using MDCB with load balancers. + +Health checks are available via the HTTP port. This is defined by `http_port` configuration setting and defaults to `8181`. Do **not** use the standard MDCB listen port (`listen_port`) for MDCB health checks. + +From MDCB v2.7.0, there are 2 health check services available: +1. `/liveness` endpoint returns a `HTTP 200 OK` response when the service is operational. +2. `/readiness` endpoint returns a `HTTP 200 OK` response when MDCB is ready to accept requests. It ensures that dependent components such as Redis and data store are connected, and the gRPC server is ready for connection. + +See [MDCB API](/tyk-mdcb-api) for details of the endpoints. + +In MDCB v2.6.0 or earlier, MDCB only offers one health check endpoint at `/health` via the port defined by the `healthcheck_port` configuration setting. The default port is `8181`. The `/health` endpoint is also available on v2.7.0 or later for backward compatibility. + +To use the health check service, call the `/health` endpoint i.e. `http://my-mdcb-host:8181/health`. This will return a `HTTP 200 OK` response if the service is running. + +Please note that an HTTP 200 OK response from the `/health` endpoint merely indicates that the MDCB service is operational. However, it is important to note that the service may not yet be ready for use if it is unable to establish a connection with its dependent components (such as Redis and Data store) or if they are offline. Upgrade to v2.7.0 and later to have more accurate health checking. + +### Troubleshooting + +#### Check that the MDCB service is running + +```console +sudo systemctl status tyk-sink +``` + +Should Return: + +```console +tyk-sink.service - Multi Data Center Bridge for the Tyk API Gateway + + Loaded: loaded (/usr/lib/systemd/system/tyk-sink.service; enabled; vendor preset: disabled) + + Active: active (running) since Thu 2018-05-03 09:39:37 UTC; 3 days ago + Main PID: 1798 (tyk-sink) + + CGroup: /system.slice/tyk-sink.service + + └─1798 /opt/tyk-sink/tyk-sink -c /opt/tyk-sink/tyk_sink.conf +``` + +#### Check that MDCB is listening on port 9091 + +```console +sudo netstat -tlnp +``` + +Should Return: + +``` +Active Internet connections (only servers) +Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name +... +tcp6 0 0 :::9091 :::* LISTEN 1798/tyk-sink +``` + +#### Check the logs for MDCB + +```{.copyWrapper} +> sudo journalctl -u tyk-sink +``` + +Add the `-f` flag to follow the log. The command should return output similar to this: + +```console +-- Logs begin at Thu 2018-05-03 09:30:56 UTC, end at Mon 2018-05-07 08:58:23 UTC. -- +May 06 11:50:37 master tyk-sink[1798]: time="2018-05-06T11:50:37Z" level=info msg="RPC Stats:{\"RPCCalls\":0,\"RPCTime\":0,\"Byte +May 06 11:50:38 master tyk-sink[1798]: time="2018-05-06T11:50:38Z" level=info msg="RPC Stats:{\"RPCCalls\":0,\"RPCTime\":0,\"Byte +... +May 06 11:50:42 master tyk-sink[1798]: time="2018-05-06T11:50:42Z" level=info msg="Ping!" +``` + +#### Check MDCB configurations + +From MDCB v2.7.0, a secured HTTP endpoint `/config` can be enabled that allows you to query configuration of MDCB. + +To enable the secured HTTP endpoint, make sure you have the following in your `/opt/tyk-sink/tyk_sink.conf` config file. + +```json +{ + "security": { + "enable_http_secure_endpoints": true, + "secret": "" + }, + "http_server_options": { + "use_ssl": true, + "certificate": { + "cert_file": ..., + "key_file": ..., + "min_version": ... + } + } +} +``` + +Subsequently, you can issue a request to the `/config` endpoint to return a json representation of your MDCB config: + +```bash +curl -H "x-tyk-authorization: " https://my-mdcb-host:8181/config +``` + +Alternatively, you can issue a request to the `/env` endpoint to return your MDCB config in the form of environment variables settings: + +```bash +curl -H "x-tyk-authorization: " https://my-mdcb-host:8181/env +``` + +### Enabling MDCB on Organization Object on Tyk Dashboard + +Before a worker gateway can connect to MDCB, it is important to enable the organization that owns all the APIs to be distributed to be allowed to utilize Tyk MDCB. To do this, the organization record needs to be modified with two flags using the [Tyk Dashboard Admin API](/dashboard-admin-api). + +To make things easier, we will first set a few [environment variables](/tyk-oss-gateway/configuration): + +1. `export DASH_ADMIN_SECRET=` + +You can find `` in `tyk_analytics.conf` file under `admin_secret` field or `TYK_DB_ADMINSECRET` environment variable. + +2. `export DASH_URL=` + +This is the URL you use to access the Dashboard (including the port if not using the default port). + +3. `export ORG_ID=` + +You can find your organization id in the Dashboard, under your user account details. + +Org ID + +4. Send a GET request to the Dashboard API to `/admin/organisations/$ORG_ID` to retrieve the organization object. In the example below, we are redirecting the output json to a file `myorg.json` for easy editing. + +```curl +curl $DASH_URL/admin/organisations/$ORG_ID -H "Admin-Auth: $DASH_ADMIN_SECRET" | python -mjson.tool > myorg.json +``` +5. Open `myorg.json` in your favorite text editor and add the following fields as follows. +New fields are between the `...` . + +```json {linenos=table,hl_lines=["5-12"],linenostart=1} +{ + "_id": "55780af69b23c30001000049", + "owner_slug": "portal-test", + ... + "hybrid_enabled": true, + "event_options": { + "key_event": { + "webhook": "https://example.com/webhook", + "email": "user@example.com", + "redis": true + }, + }, + ... + "apis": [ + { + "api_human_name": "HttpBin (again)", + "api_id": "2fdd8512a856434a61f080da67a88851" + } + ] +} +``` + +In the example above it can be seen that the `hybrid_enabled` and `event_options` configuration fields have been added: + +- `hybrid_enabled:` Allows a worker gateway to login as an organization member into MDCB. +- `event_options:` The `event_options` object is optional. By default the update and removal of Redis keys (hashed and unhashed), API Definitions and policies are propagated to various instance zones. The `event_options` object contains a `key_event` object that allows configuration of the following additional features: + + - event notification mechanism for all Redis key (hashed and unhashed) events. Events can be notified via webhook by setting the `webhook` property to the value of the webhook URL. Similarly, events can be notified via email by setting the `email` property to the value of the target email address. + - enable propagation of events for when an OAuth token is revoked from Dashboard by setting the `redis` property to `true`. + + The `event_options` in the example above enables the following functionality: + + - events are propagated when OAuth tokens are revoked from Dashboard since `redis` is `true` + - events associated with Redis keys (hashed and unhashed) and revoking OAuth tokens via Dashboard are sent to webhook `https://example.com/webhook` and email address `user@example.com` + +6. Update your organization with a PUT request to the same endpoint, but this time, passing in your modified `myorg.json` file. + +```curl +curl -X PUT $DASH_URL/admin/organisations/$ORG_ID -H "Admin-Auth: $DASH_ADMIN_SECRET" -d @myorg.json +``` + +This should return: + +```json +{"Status":"OK","Message":"Org updated","Meta":null} +``` + +## Setup MDCB Data Plane + +You may configure an unlimited number of [Tyk Data Planes](/api-management/mdcb#data-plane) containing Worker Gateways for ultimate High Availablity (HA). We recommend that you deploy your worker gateways as close to your upstream services as possible in order to reduce latency. + +It is a requirement that all your Worker Gateways in a Data Plane data center share the same Redis DB in order to take advantage of Tyk's DRL and quota features. +Your Data Plane can be in the same physical data center as the Control Plane with just a logical network separation. If you have many Tyk Data Planes, they can be deployed in a private-cloud, public-cloud, or even on bare-metal. + +### Installing in a Kubernetes Cluster with our Helm Chart + +The [Tyk Data Plane](/product-stack/tyk-charts/tyk-data-plane-chart) helm chart is pre-configured to install Tyk Gateway and Tyk Pump that connects to MDCB or Tyk Cloud, our SaaS MDCB Control Plane. After setting up Tyk Control Plane with Helm Chart, obtain the required connection details from installation output and configure data plane chart as below. For Tyk Cloud users, following [Tyk Cloud instructions](/tyk-cloud/environments-deployments/hybrid-gateways) to deploy your hybrid gateways. + +#### Prerequisites + +* [Kubernetes 1.19+](https://kubernetes.io/docs/setup/) +* [Helm 3+](https://helm.sh/docs/intro/install/) +* Connection details to remote control plane from the tyk-control-plane installation output. + +The following quick start guide explains how to use the [Tyk Data Plane Helm chart](/product-stack/tyk-charts/tyk-data-plane-chart) to configure Tyk Gateway that includes: +- Redis for key storage +- Tyk Pump to send analytics to Tyk Control Plane and Prometheus + +At the end of this quickstart Tyk Gateway should be accessible through service `gateway-svc-tyk-dp-tyk-gateway` at port `8080`. Pump is also configured with Hybrid Pump which sends aggregated analytics to MDCB, and Prometheus Pump which expose metrics locally at `:9090/metrics`. + +1. **Set connection details** + + Set the below environment variables and replace values with connection details to your MDCB control plane. See [Tyk Data Plane](/product-stack/tyk-charts/tyk-data-plane-chart#obtain-remote-control-plane-connection-details-from-tyk-control-plane-chart) documentation on how to get the connection details. + + ```bash + USER_API_KEY=9d20907430e440655f15b851e4112345 + ORG_ID=64cadf60173be90001712345 + MDCB_CONNECTIONSTRING=mdcb-svc-tyk-cp-tyk-mdcb.tyk-cp.svc:9091 + GROUP_ID=your-group-id + MDCB_USESSL=false + ``` + +2. **Then use Helm to install Redis and Tyk** + + ```bash + NAMESPACE=tyk-dp + APISecret=foo + + helm repo add tyk-helm https://helm.tyk.io/public/helm/charts/ + helm repo update + + helm upgrade tyk-redis oci://registry-1.docker.io/bitnamicharts/redis -n $NAMESPACE --create-namespace --install + + helm upgrade tyk-dp tyk-helm/tyk-data-plane -n $NAMESPACE --create-namespace \ + --install \ + --set global.remoteControlPlane.userApiKey=$USER_API_KEY \ + --set global.remoteControlPlane.orgId=$ORG_ID \ + --set global.remoteControlPlane.connectionString=$MDCB_CONNECTIONSTRING \ + --set global.remoteControlPlane.groupID=$GROUP_ID \ + --set global.remoteControlPlane.useSSL=$MDCB_USESSL \ + --set global.secrets.APISecret="$APISecret" \ + --set global.redis.addrs="{tyk-redis-master.$NAMESPACE.svc.cluster.local:6379}" \ + --set global.redis.passSecret.name=tyk-redis \ + --set global.redis.passSecret.keyName=redis-password + ``` + +3. **Done!** + + Now Tyk Gateway should be accessible through service `gateway-svc-tyk-dp-tyk-gateway` at port `8080`. Pump is also configured with Hybrid Pump which sends aggregated analytics to MDCB, and Prometheus Pump which expose metrics locally at `:9090/metrics`. + + For the complete installation guide and configuration options, please see [Tyk Data Plane Chart](/product-stack/tyk-charts/tyk-data-plane-chart). + +### Configuring an existing Tyk Gateway +If you have Redis and a working Tyk Gateway deployed, follow below steps to configure your gateways to work in RPC mode. + + + +If you have deployed Gateway with `tyk-data-plane` Chart, you don't need to go through following steps to configure Tyk Gateway. The necessary configurations has been set in `tyk-data-plane` chart templates. + + + +#### Prerequisites +- Redis +- A working headless/open source Tyk Gateway deployed + +#### Worker Gateway Configuration + +Modify the Tyk Gateway configuration (`tyk.conf`) as follows: +`"use_db_app_configs": false,` + +Next, we need to ensure that the policy loader and analytics pump use the RPC driver: + +```{.json} +"policies": { + "policy_source": "rpc", + "policy_record_name": "tyk_policies" +}, +"analytics_config": { + "type": "rpc", + ... // remains the same +}, +``` + +Lastly, we add the sections that enforce the Worker mechanism: + +```{.json} +"slave_options": { + "use_rpc": true, + "rpc_key": "{ORGID}", + "api_key": "{APIKEY}", + "connection_string": "{MDCB_HOSTNAME:9091}", + "enable_rpc_cache": true, + "bind_to_slugs": false, + "group_id": "{ny}", + "use_ssl": false, + "ssl_insecure_skip_verify": true +}, +"auth_override": { + "force_auth_provider": true, + "auth_provider": { + "name": "", + "storage_engine": "rpc", + "meta": {} + } +} +``` + + +if you set `analytics_config.type` to `rpc` - make sure you don't have your Tyk Pump configured to send analytics via the `hybrid` Pump type. + + + + +As an optional configuration you can use `key_space_sync_interval` to set the period's length in which the gateway will check for changes in the key space, if this value is not set then by default it will be 10 seconds. + + +The most important elements here are: + +| Field | Description | +| :--------------- | :---------------- | +|`api_key` |This the API key of a user used to authenticate and authorize the Gateway's access through MDCB. The user should be a standard Dashboard user with minimal privileges so as to reduce risk if compromised. The suggested security settings are `read` for `Real-time notifications` and the remaining options set to `deny`.| +|`group_id` |This is the "zone" that this instance inhabits, e.g. the cluster/data center the gateway lives in. The group ID must be the same across all the gateways of a data center/cluster which are also sharing the same Redis instance. This id should also be unique per cluster (otherwise another gateway's cluster can pick up your keyspace events and your cluster will get zero updates). +|`connection_string` |The MDCB instance or load balancer.| +|`bind_to_slugs` | For all Tyk installations except for Tyk Classic Cloud this should be set to false.| + +Once this is complete, you can restart the Tyk Gateway in the Data Plane, and it will connect to the MDCB instance, load its API definitions, and is ready to proxy traffic. + +## Minimizing latency with MDCB + +### Overview + +As described [previously](/api-management/mdcb#managing-geographically-distributed-gateways-to-minimize-latency-and-protect-data-sovereignty), Acme Global Bank has operations and customers in both the EU and USA. + +To decrease the latency in response from their systems and to ensure that data remains in the same legal jurisdiction as the customers (data residency), they have deployed backend (or, from the perspective of the API gateway, β€œupstream”) services in two data centers: one in the US, the other in the EU. + +Without a dedicated solution for this multi-region use case, Acme Global Bank would deploy a Tyk Gateway cluster in each data center and then have the operational inconvenience of maintaining two separate instances of Tyk Dashboard to configure, secure and publish the APIs. + +By using Tyk's Multi-Data Center Bridge (MDCB), however, they are able to centralise the management of their API Gateways and gain resilience against failure of different elements of the deployment - data or control plane - improving the availability of their public APIs. + +In this example we will show you how to create the Acme Global Bank deployment using our example Helm charts. + +MDCB Proof of Concept - Acme Global Bank + +**Step-by-step instructions to deploy the Acme Global Bank scenario with Kubernetes in a public cloud (here we’ve used Google Cloud Platform):** + +### Pre-requisites and configuration + +1. What you need to install/set-up + - Tyk Pro license (Dashboard and MDCB keys - obtained from Tyk) + - Access to a cloud account of your choice, e.g. GCP + - You need to grab this Tyk Demo repository: [GitHub - TykTechnologies/tyk-k8s-demo](https://github.com/TykTechnologies/tyk-k8s-demo) + - You need to install `helm`, `jq`, `kubectl` and `watch` + +2. To configure GCP + - Create a GCP cluster + - Install the Google Cloud SDK + - install `gcloud` + - `./google-cloud-sdk/install.sh` + - Configure the Google Cloud SDK to access your cluster + - `gcloud auth login` + - `gcloud components install gke-gcloud-auth-plugin` + - `gcloud container clusters get-credentials <> β€”zone <>β€”project <>` + - Verify that everything is connected using `kubectl` + - `kubectl get nodes` + +3. You need to configure the Tyk build + - Create a `.env` file within tyk-k8s-demo based on the provided `.env.example` file + - Add the Tyk license keys to your `.env`: + - `LICENSE=` + - `MDCB_LICENSE=` + +### Deploy Tyk Stack to create the Control and Data Planes + +1. Create the Tyk Control Plane + - `./up.sh -r redis-cluster -e load-balancer tyk-cp` + +*Deploying the Tyk Control Plane* +Tyk Control Plane Deployed + +2. Create two logically-separate Tyk Data Planes (Workers) to represent Acme Global Bank’s US and EU operations using the command provided in the output from the `./up.sh` script: + - `TYK_WORKER_CONNECTIONSTRING= TYK_WORKER_ORGID= TYK_WORKER_AUTHTOKEN= TYK_WORKER_USESSL=false ./up.sh --namespace tyk-worker` + +Note that you need to run the same command twice, once setting `` to `tyk-worker-us`, the other to `tyk-worker-eu` (or namespaces of your choice) + +*Deploying the `tyk-worker-us` namespace (Data Plane #1)* +Deploying the tyk-worker-us namespace + +*Deploying the `tyk-worker-eu` namespace (Data Plane #2)* +Deploying the tyk-worker-eu namespace + +3. Verify and observe the Tyk Control and Data Planes + - Use `curl` to verify that the gateways are alive by calling their `/hello` endpoints + +observe Tyk K8s namespace console output + + - You can use `watch` to observe each of the Kubernetes namespaces + +*`tyk-cp` (Control Plane)* +Control Plane + +*`tyk-worker-us` (Data Plane #1)* +Data Plane #1 + +*`tyk-worker-eu` (Data Plane #2)* +Data Plane #2 + +### Testing the deployment to prove the concept +As you know, the Tyk Multi Data Center Bridge provides a link from the Control Plane to the Data Plane (worker) gateways, so that we can control all the remote gateways from a single dashboard. + +1. Access Tyk Dashboard + - You can log into the dashboard at the external IP address reported in the watch window for the Control Plane - in this example it was at `34.136.51.227:3000`, so just enter this in your browser + - The user name and password are provided in the `./up.sh` output: + - username: `default@example.com` + - password: `topsecretpassword` (or whatever you’ve configured in the `.env` file) + +Tyk Dashboard login + +2. Create an API in the dashboard, but don’t secure it (set it to `Open - keyless`); for simplicity we suggest a simple pass-through proxy to `httbin.org`. +3. MDCB will propagate this API through to the workers - so try hitting that endpoint on the two Data Plane gateways (their addresses are given in the watch windows: for example `34.173.240.149:8081` for my `tyk-worker-us` gateway above). +4. Now secure the API from the dashboard using the Authentication Token option. You’ll need to set a policy for the API and create a key. +5. If you try to hit the API again from the workers, you’ll find that the request is now rejected because MDCB has propagated out the change in authentication rules. Go ahead and add the Authentication key to the request header… and now you reach `httpbin.org` again. You can see in the Dashboard’s API Usage Data section that there will have been success and error requests to the API. +6. OK, so that’s pretty basic stuff, let’s show what MDCB is actually doing for you… reset the API authentication to be `Open - keyless` and confirm that you can hit the endpoint without the Authentication key from both workers. +7. Next we’re going to experience an MDCB outage - by deleting its deployment in Kubernetes: +
`kubectl delete deployment.apps/mdcb-tyk-cp-tyk-pro -n tyk` +8. Now there's no connection from the data placne to the control plane, but try hitting the API endpoint on either worker and you’ll see that they continue serving your users' requests regardless of their isolation from the Control Plane. +9. Back on the Tyk Dashboard make some changes - for example, re-enable Authentication on your API, add a second API. Verify that these changes **do not** propagate through to the workers. +10. Now we’ll bring MDCB back online with this command: +
`./up.sh -r redis-cluster -e load-balancer tyk-cp` +11. Now try hitting the original API endpoint from the workers - you’ll find that you need the Authorization key again because MDCB has updated the Data Planes with the new config from the Control Plane. +12. Now try hitting the new API endpoint - this will also have automatically been propagated out to the workers when MDCB came back up and so is now available for your users to consume. + +Pretty cool, huh? + +There’s a lot more that you could do - for example by deploying real APIs (after all, this is a real Tyk deployment) and configuring different Organization Ids for each Data Plane to control which APIs propagate to which workers (allowing you to ensure data localisation, as required by the Acme Global Bank). + +### Closing everything down +We’ve provided a simple script to tear down the demo as follows: +1. `./down.sh -n tyk-worker-us` +2. `./down.sh -n tyk-worker-eu` +3. `./down.sh` + +**Don’t forget to tear down your clusters in GCP if you no longer need them!** + +## Synchroniser feature with MDCB + +### Overview + +In order to process API requests the worker Gateways need resources such as API keys, certificates, and OAuth clients. To ensure high availability these resources need to be synchronised in worker Gateways. + +Prior to Tyk Gateway v4.1, the API keys, certificates and OAuth clients required by worker Gateways were synchronised from the controller Gateway on-demand. With Gateway v4.1 and MDCB v2.0.3 we introduced a new configurable option that user may choose to have proactive synchronisation of these resources to the worker Gateways when they start up. + +This change improves resilience in case the MDCB link or controller Gateway is unavailable, because the worker Gateways can continue to operate independently using the resources stored locally. There is also a performance improvement, with the worker Gateways not having to retrieve resources from the controller Gateway when an API is first called. + +Changes to keys, certificates and OAuth clients are still synchronised to the worker Gateways from the controller when there are changes and following any failure in the MDCB link. + +### How does worker Gateways get resources from MDCB control plane? + +**Without Synchroniser** + +If [Synchroniser](/tyk-multi-data-centre/mdcb-configuration-options#sync_worker_configenabled) is disabled, the resources were pulled by the worker Gateways on-demand and not in advance. It means that first it checks if the resource lives in the local Redis and if it doesn’t exist then it tries to pull it from the control plane to store it locally. + +Every time that a key is updated or removed the control plane emits a signal to all the cluster gateways to update the key accordingly. + +Considerations: +This introduces a single point of failure. When the MDCB or controller Gateway in the control plane fails then the worker Gateways are also affected. + +Without Synchroniser + +**With Synchroniser** + +If [Synchroniser](/tyk-multi-data-centre/mdcb-configuration-options#sync_worker_configenabled) is enabled, API keys, certificates and OAuth clients are synchronised and stored in the local redis server in advance. When one of those resources is created, modified or deleted, a signal will be emitted which allows the worker Gateways to respond accordingly. The transmitted information includes type of resource, action (create, update, delete), if hashed (in the case of keys), and resource ID so the changes are applied in the worker Gateways accordingly. + +Considerations: +- Size of local Redis storage: If there are a lot of keys / resources to be synchronised this will increase the size of local Redis storage. The data stored in Redis, including keys, OAuth clients, and certificates, is passed to the Redis instance of each data plane. This is a characteristic of the synchronisation mechanism and occurs regardless of whether these elements are being actively used on a given data plane. Keep in mind that even if certain resources are not being utilized in a specific data plane, they are still stored and maintained in sync by the Multi Data Center Bridge (MDCB). Therefore, if your system has a large volume of keys, OAuth clients, and certificates, this could increase the storage requirements and network traffic between your data planes. It's essential to consider these factors when designing and scaling your system. +- Data residency: The synchronization of resources does not support the application of this feature to specific groups. Instead, all keys, oauth-clients, etc. will be propagated to all Redis instances in the worker Gateways, without any differentiation based on groups. This should be considered for those customers who have a single control plane but multiple clusters of worker Gateways connected. In this scenario, all Redis instances in the Worker Gateways will receive all the keys. This aspect should be taken into account if you have specific data residency requirements. + +With Synchroniser + +### Configuring the Synchroniser for Tyk Self Managed + + + +The synchroniser feature is disabled by default. To enable it, please configure both the worker Gateways and MDCB control plane accordingly. + + + +1. **Worker Gateway configuration** + + First, configure the worker Gateway to enable synchroniser: + + `"slave_options":{ "synchroniser_enabled":true }` + + Please see [Gateway configuration options](/tyk-oss-gateway/configuration#slave_optionssynchroniser_enabled) for reference. + + To configure how often the worker Gateways read signals from MDCB control plane: + + `"slave_options":{ "key_space_sync_interval": 10 }` + + It configures the interval (in seconds) that the worker Gateway will take to check if there are any changes. If this value is not set then it will default to 10 seconds. + + Please see [Gateway configuration options](/tyk-oss-gateway/configuration#slave_optionskey_space_sync_interval) for reference. + + If you are running a cluster of Gateways, you must have a _GroupID_ configured for synchronisation to work properly and propagate keys. + + `"slave_options":{ "group_id": "FOOBAR" }` + + + Please see [Gateway configuration options](/tyk-oss-gateway/configuration#slave_optionsgroup_id) for reference + +2. **Control Plane configuration** + + Secondly, configure the control plane. The most simple configuration to enable this feature in the MDCB config file is: + + - MDCB: + + `"sync_worker_config":{ "enabled":true }` + + Please see [MDCB configuration options](/tyk-multi-data-centre/mdcb-configuration-options#sync_worker_config) for reference. + + If API keys were used and hash key is disabled, please also set these additional configurations for the following components: + + - MDCB: + + `"sync_worker_config":{ "enabled":true, "hash_keys": false }, "hash_keys": false` + + - Dashboard: + + `"hash_keys": false` + + - Controller Gateway: + + `"hash_keys": false` + + If certificates were used, please also set these additional configurations: + + - MDCB + + Set `"security.private_certificate_encoding_secret"` with the certificate encoding secret. This is required because MDCB would decode the certificate first before propagating it to worker gateways. The worker Gateways could encode the certificate with their own secret. + + Please see [MDCB configuration options](/tyk-multi-data-centre/mdcb-configuration-options#securityprivate_certificate_encoding_secret) for reference + +### Configuring the Synchroniser for Tyk Cloud + +Please [submit a support ticket](https://support.tyk.io/hc/en-gb) to us if you want to enable Synchroniser for your Tyk Cloud deployment. + +### Troubleshooting + +1. **How do I know when synchronisation happened?** + + You could check the MDCB log message to know about when synchronisation started and finished: + + ``` + Starting oauth clients sync worker for orgID... + Starting keys sync worker for orgID... + Starting keys sync worker for orgID... + + Sync APIKeys worker for orgID:... + Sync Certs worker for orgID:... + Sync oauth worker for orgID:... + ``` + +2. **Can I trigger a re-synchronisation?** + + Synchronisation will be triggered once the Time To Live (TTL) of a worker Gateway has expired. The default expiry duration is 3 minutes. The Time To Live (TTL) value can be set via [sync_worker_config.group_key_ttl](/tyk-multi-data-centre/mdcb-configuration-options#sync_worker_configgroup_key_ttl) diff --git a/api-management/migrate-from-tyk-classic.mdx b/api-management/migrate-from-tyk-classic.mdx new file mode 100644 index 000000000..ba88365d8 --- /dev/null +++ b/api-management/migrate-from-tyk-classic.mdx @@ -0,0 +1,197 @@ +--- +title: "Migrating from Tyk Classic APIs" +description: "API Migration: Converting Tyk Classic APIs to Tyk OAS Format" +keywords: "Tyk OAS API, Tyk Classic API, Migrate, Convert, Migration, Tyk Classic, Tyk OAS, API definition" +sidebarTitle: "Migrate from Tyk Classic APIs" +--- + +## Overview + +From Tyk 5.8.0, you can convert your existing [Tyk Classic APIs](/api-management/gateway-config-tyk-classic) to the recommended [Tyk OAS API](/api-management/gateway-config-tyk-oas) format. + +The API Migration feature provides a powerful way to convert your existing Tyk Classic API definitions to the newer Tyk OAS format with various options to ensure a smooth transition. We've built support into the Tyk Dashboard's [API Designer](/api-management/migrate-from-tyk-classic#using-the-api-designer) to convert individual Tyk Classic APIs one by one. The Tyk Dashboard API [migrate endpoint](/api-management/migrate-from-tyk-classic#using-the-migration-api) allows you to migrate individual APIs or perform bulk migrations. + +### Features of Tyk Dashboard's Migration Feature + +- **Flexibility**: Migrate APIs using the API Designer or using the Tyk Dashboard API +- **Bulk Migration**: Convert multiple APIs in a single operation via the Dashboard API +- **Risk Mitigation**: Test migrations before applying changes +- **Phased Implementation**: Stage and test migrations before final deployment +- **Detailed Reporting**: Get comprehensive success, failure, and skip information + +## Migration Modes + +Tyk Dashboard supports four different migration modes to suit your needs: + +1. **Dry Run Mode**: Simulates the conversion process without making changes. This mode returns the converted API definitions in Tyk OAS format for your review, allowing you to verify the conversion before committing to it. + +2. **Stage Mode**: Perform a phased conversion by creating staged copies of your APIs in Tyk OAS format alongside the existing Tyk Classic APIs. You can then thoroughly test that the migrated APIs will behave as expected without affecting production traffic. When you are happy, you can **promote** the staged APIs. + + The **staged** API will be the same as the original Tyk Classic, with the following modifications to allow it to coexist: + + - The API ID will have the `staging-` prefix added + - `[staged] ` prefix will be added to the API name + - The listen path will be modified by the addition of the `/tyk-staging` prefix + - A reference will be added to link the staged API to the original Tyk Classic API's ID + +3. **Promote Mode**: Promote previously staged APIs, replacing their Tyk Classic counterparts. This process removes the staging prefixes from ID, name, and listen path. It then replaces the original Tyk Classic API with the Tyk OAS version - the Tyk OAS API will inherit both the API ID and database ID of the original API. + +4. **Direct Mode**: Directly migrates APIs from Tyk Classic to Tyk OAS format without staging; typically this will be used if testing was performed on a **dry run** copy of the original. This mode will also replace the original Tyk Classic API with the Tyk OAS version - the Tyk OAS API will inherit both the API ID and database ID of the original API. + + + + + Note that both Promote and Direct operations are destructive. The converted API will replace the original in the API database, inheriting both API Id and database ID. + + + +## Using the API Designer + +The API Designer provides a simple interface for migrating your Tyk Classic APIs to Tyk OAS, offering both **staged** and **direct** conversions. + +1. **Back Up Your APIs:** + + First ensure that you have taken a backup of your API, in case of accidental deletion - the direct and promote operations are destructive and will permanently delete the original Tyk Classic API definition from your database. You can export the API definition using the **Actions** menu or, if you want to backup all APIs you can do so via the Tyk Dashboard API by following [these instructions](/developer-support/upgrading#backup-apis-and-policies). + +2. **Start API Migration:** + + Find the API that you want to convert in the list of **Created APIs**. You can use the **Select API type** filter to show just Tyk Classic APIs. + + Applying a filter to see just the Tyk Classic APIs + + From the **Actions** menu select **Convert to Tyk OAS**. You can also find this option in the **Actions** menu within the Tyk Classic API Designer. + + Convert an API to Tyk OAS + + This will open the **Convert to Tyk OAS** mode selector. Choose whether to **stage** the conversion or perform a **direct** migration, then select **Convert API**. + + Choosing the migration path: staged or direct + +5. **Staging the API:** + + If you selected **stage** you'll be taken to the API Designer for the newly created staged Tyk OAS API. Note the prefixes that have been applied to the API name, API ID and API base path. + + The staged Tyk OAS API with prefixes + + For the staged API, you can now validate the configuration, make any required modifications and test that behavior is as expected. + +6. **Promote the Stage API:** + + When you are ready, you can promote the staged API to complete migration by selecting **Promote staged API** from the API Designer's **Actions** menu. Note that the **promote** option is not available from the API list, only within the API Designer for the specific API. This is to protect against accidentally promoting the wrong API. + + Promoting a staged API +
+ + Confirm the promotion +
+ + + +There is a known issue in Tyk 5.8.0 that the cancel button does not work in the promotion confirmation window, so once you have selected Promote staged API you will have to hit back in your browser if you do not want to complete the migration. This will be addressed in the next patch. + + + + +7. **Final Stage:** + + Following promotion, or if you selected **direct** conversion, you'll be taken to the API Designer for the newly created Tyk OAS API. Note that this has inherited the API ID from your Tyk Classic API and has replaced the Tyk Classic in your API database. + + Migration complete! + +## Using the Migration API + +You can use the Tyk Dashboard API to convert individual APIs, multiple API, or all APIs stored in your API storage database via the dedicated [/migrate](/api-management/migrate-from-tyk-classic#tyk-dashboard-api-migrate-endpoint) endpoint. + +### Tyk Dashboard API Migrate Endpoint + +`POST /api/apis/migrate` + +The payload for this request is: + +```json +{ + "mode": "dryRun", // Required: Migration mode (dryRun, stage, promote, direct) + "apiIDs": ["api123", "api456"], // List of API IDs to migrate (cannot be used with 'all') + "all": false, // Migrate all APIs (cannot be used with 'apiIDs') + "abortOnFailure": false, // Stop migration process on first failure + "overrideStaged": false // When mode is 'stage', overwrite existing staged APIs +} +``` + +- Indicate the migration [mode] using the following options: + + - `dryRun` + - `stage` + - `promote` + - `direct` + +- You can convert specific APIs by providing their API IDs in the `apiIDs` array or convert all your Tyk Classic APIs in one go using the `all` option. + +- Set `abortOnFailure` to `true` if you want Tyk to stop processing if it encounters a failure while operating on a batch of APIs. + +- You can only have one **staged** version of a Tyk Classic API, so if you have already started the migration of an API and try again - for example after making changes to the original Tyk Classic API, the operation will fail. Use **overrideStaged** to delete the existing staged API and create a new one. + + +#### Example: Dry Run Migration + +``` +curl -X POST \ + https://your-tyk-dashboard/api/apis/migrate \ + -H "Authorization: your-dashboard-api-key" \ + -H "Content-Type: application/json" \ + -d '{ + "mode": "dryRun", + "apiIDs": ["api123", "api456"], + "abortOnFailure": false + }' +``` + +#### Example: Migrate All APIs + +``` +curl -X POST \ + https://your-tyk-dashboard/api/apis/migrate \ + -H "Authorization: your-dashboard-api-key" \ + -H "Content-Type: application/json" \ + -d '{ + "mode": "direct", + "all": true, + "abortOnFailure": false + }' +``` + +## Known Limitations of Migration + +There are some differences between the way Tyk Gateway works with Tyk Classic and Tyk OAS APIs, so you are advised to take the following into account and, if your API uses any of these features, to stage (or dry run) your migrations and ensure you perform appropriate testing on the interim versions. + +1. **Handling of Regular Expression Path Parameters** + + When migrating APIs with regex-based path parameters, be aware that: + + - In Tyk Classic, the Gateway only routes requests matching the regex pattern + - In Tyk OAS, the Gateway routes all requests matching the pattern structure by default + + Recommended action: Enable the [Validate Request](/api-management/traffic-transformation/request-validation#request-validation-using-tyk-oas) middleware in your migrated Tyk OAS API to maintain the same behavior. + +2. **Location of Mock Response Middleware** + + The position of mock response middleware in the request processing chain [differs between Tyk Classic and Tyk OAS](/api-management/traffic-transformation/mock-response#middleware-execution-order-during-request-processing): + + - In Tyk Classic, it appears at the start of the request processing chain (before authentication) + - In Tyk OAS, it appears at the end of the request processing chain + + During migration, the system automatically adds the [ignore authentication](/api-management/traffic-transformation/ignore-authentication#ignore-authentication-overview) middleware to endpoints with mock responses to maintain similar behavior. Note, however, that any other middleware configured for that endpoint or at the API level will be applied for the Tyk OAS API (which was not the case for the Tyk Classic API). + +3. **Enhanced Request Validation** + + Tyk OAS uses the more advanced [Validate Request](/api-management/traffic-transformation/request-validation#request-validation-using-tyk-oas) middleware, whereas Tyk Classic is limited to the [Validate JSON](/api-management/traffic-transformation/request-validation#request-validation-using-classic) middleware. The migration will configure Validate Request to check the request body (as performed by Validate JSON). + +## Recommended Migration Strategy + +For a safe migration approach, we recommend following these steps: +1. **Back Up Your APIs**: Perform a full [back-up](/developer-support/upgrading#backup-apis-and-policies) of your API Definitions - remember that the final migration is destructive, as the Tyk OAS APIs will inherit the database and API IDs of the originals +2. **Start with Dry Run**: Use the `dryRun` mode to validate the migration before making changes +3. **Stage Critical APIs**: For important APIs, use the `stage` mode to create test versions +4. **Test Thoroughly**: Verify all functionality in the staged APIs +5. **Promote Gradually**: Once testing is complete, use the `promote` mode to complete the migration of original APIs in batches +6. **Monitor Performance**: After migration, closely monitor the performance of migrated APIs diff --git a/api-management/multiple-environments.mdx b/api-management/multiple-environments.mdx new file mode 100644 index 000000000..8fffaab2a --- /dev/null +++ b/api-management/multiple-environments.mdx @@ -0,0 +1,401 @@ +--- +title: "Manage Multiple Environments" +description: "How to Manage Multiple Environments" +keywords: "TLS, SSL, Security, Certificate, Pinning" +sidebarTitle: "Manage Environments" +--- + +## Introduction + +It is possible with the Multi-Cloud and the Self-Managed version of Tyk to manage multiple environments across data centers. This can be very useful if you have QA, UAT and Production environments that are physically or geographically separate and you want to move API configurations between environments seamlessly. + +## What is API Sharding ? + +It is possible to use tags in various Tyk objects to change the behavior of a Tyk cluster or to modify the data that is sent to the analytics engine. Tags are free-form strings that can be embedded in Gateway configurations, API definitions, Policies and Individual Keys. + +Tags are used in two ways: To segment a cluster into various "zones" of API management, and secondly, to push more data into the analytics records to make reporting and tracking easier. + +### API Sharding + +API Sharding is what we are calling our approach to segmenting a Tyk cluster (or data centers) into different zones. An example of this in action would be to imagine you have separate VPCs that deal with different classes of services, lets say: Health, Banking and Pharma. + +You don't need the nodes that handle all the traffic for your Pharma APIs to load up the definitions for the other zones' services, this could allow someone to send unexpected traffic through (it may not go anywhere). + +Alternatively, you could use segmentation to have separate API definitions for multiple data centers. In this way you could shard your API definitions across those DC's and not worry about having to reconfigure them if there is a failover event. + +### Using Sharding to handle API life-cycle with multiple data centers + +You can use sharding to very quickly publish an API from a `development` system to `staging` or `live`, simply by changing the tags that are applied to an API definition. + +With Tyk Community Edition and Tyk Pro, these clusters must all share the same Redis DB. + +If you are an Enterprise user, then you can go a step further and use the [Tyk Multi Data Center Bridge](/api-management/mdcb#managing-geographically-distributed-gateways-to-minimize-latency-and-protect-data-sovereignty) to have full multi-DC, multi-zone cluster segmentation, and manage APIs in different segments across different database back-ends. + +### Analytics and Reporting + +In order to use tags in analytics, there are two places where you can add a `"tags":[]` section: a Policy Definition, and a Session object for a token. + +Policy tags completely replace key tags, these tags are then fed into the analytics system and can be filtered in the dashboard. + +### Node Tags + +If your API is segmented, node tags will be appended to the analytics data, this will allow you to filter out all traffic going through a specific node or node cluster. + + + +If you set `use_db_app_options.node_is_segmented` to `true` for multiple gateway nodes, you should ensure that `management_node` is set to `false`. This is to ensure visibility for the management node across all APIs. + + + + +`management_node` is available from v2.3.4 and onwards. + +See [Tyk Gateway Configuration Options](/tyk-oss-gateway/configuration) for more details on node tags. + +## Move APIs Between Environments + +It is possible to move APIs between Tyk environments in the following ways: + +### In Shared Dashboard Environments + +If the environments are both Self-Managed installations and are sharing a Tyk Dashboard (and optionally an MDCB instance) then you can use API and Gateway tagging to transparently and effortlessly move an API from one environment to another. + +See [API Tagging](/api-management/multiple-environments#api-tagging-with-on-premises) for more details. + +#### API Sharding + +You can also use [API Sharding](/api-management/multiple-environments#what-is-api-sharding-) to move APIs in a Shards (and or MDCB) Tyk Self-Managed installation. + +### In Separate Dashboard Environments + +If the API dashboards are separate and you wish to migrate API Definitions between two completely segregated environments (e.g. migrating to new hardware or a new DC), then you can use the Export functionality of the Dashboard to download the API definition as JSON and import it into your new installation. + +#### Steps for Configuration: + +1. **Select Your API** + + From the **API Designer**, select your API: + + API designer + +2. **Export the API** + + Click **EXPORT**: + + Export button location + +3. **Save the API** + + Save and rename the JSON file: + +4. **Import into your New Environment** + + In your new environment, click **IMPORT API**: + + Select import + +5. **Generate the new API** + + Select the **From Tyk Definition** tab and paste the contents of the JSON file into the code editor and click **GENERATE API**: + + Generate API + + This will now import the API Definition into your new environment, if you have kept the API ID in the JSON document as is, the ID will remain the same. + + + + + The ID you use in with any Dashboard API integrations will change as the documents physical ID will have changed with the import. + + + +### Use Tyk-Sync + +You can also use our new Tyk-Sync tool which allows you to sync your APIs (and Policies) with a Version Control System (VCS). You can then move your APIs between environments. See [Tyk-Sync](/api-management/automations/sync) for more details. + +## Move Keys Between Environments + +Tyk currently does not have a facility to export a group of keys from one environment and reissue them in another and still be able to manage those keys from within the Dashboard. + +However, it is possible to temporarily allow access to existing keys in a new environment, but it should be noted that these keys should eventually be expired and re-generated within the new environment. + +### Moving Keys Between Environments / Creating Custom Keys + +In order to use a legacy key in a new environment, simply extract the key from the old environment using the Tyk REST APIs and then create them in the new environment using the custom key creation API. + +To create a key with a custom identifier, ie Token, simply use the [Gateway (OSS)](/tyk-gateway-api) or [Dashboard (Pro)](/api-management/dashboard-configuration#create-a-custom-key) REST APIs to import a custom key. + +## Move Policies Between Environments + +Moving policies between two (Dashboard) environments is not as easy as moving API definitions and requires working with the Dashboard API to first retrieve the policies, and then modifying the document to reinsert them in your new environment: + +### Preparation + +First you must set up your new environment to respect explicit policy IDs. To do so, edit the `tyk.conf` and `tyk_analytics.conf` files in your new environment and set the `policies. allow_explicit_policy_id` setting to `true` (the setting is just `allow_explicit_policy_id` at the root level of the Dashboard configuration). In order to retain your `api_id` when moving between environments then set `enable_duplicate_slugs` to `true` in your target `tyk_analytics.conf`. + +### Steps for Configuration + +1. **Get your Policy** + + ```{.copyWrapper} + curl -X GET -H "authorization: {YOUR TOKEN}" \ + -s \ + -H "Content-Type: application/json" \ + https://admin.cloud.tyk.io/api/portal/policies/{POLICY-ID} | python -mjson.tool > policy.json + ``` + +2. **Edit the file we just created** + + The original file will look something like this, notice the two ID fields: + + ```{.json} + { + "_id": "5777ecdb0a91ff0001000003", + "access_rights": { + "xxxxx": { + "allowed_urls": [], + "api_id": "xxxxx", + "api_name": "Test", + "versions": [ + "Default" + ] + } + }, + "active": true, + "date_created": "0001-01-01T00:00:00Z", + "hmac_enabled": false, + "id": "", + "is_inactive": false, + "key_expires_in": 0, + "name": "Test Policy", + "org_id": "xxxxx", + "partitions": { + "acl": false, + "quota": false, + "rate_limit": false + }, + "per": 60, + "quota_max": -1, + "quota_renewal_rate": 60, + "rate": 1000, + "tags": [] + } + ``` + +3. **Move the id field value** + + Remove the `_id` field and put the value of the `_id` field into the `id` field, so `policy.json` should look like this: + + ```{.json} + { + "access_rights": { + "xxxxx": { + "allowed_urls": [], + "api_id": "xxxxx", + "api_name": "Test", + "versions": [ + "Default" + ] + } + }, + "active": true, + "date_created": "0001-01-01T00:00:00Z", + "hmac_enabled": false, + "id": "5777ecdb0a91ff0001000003", <------ NEW ID FIELD + "is_inactive": false, + "key_expires_in": 0, + "name": "Test Policy", + "org_id": "xxxxx", + "partitions": { + "acl": false, + "quota": false, + "rate_limit": false + }, + "per": 60, + "quota_max": -1, + "quota_renewal_rate": 60, + "rate": 1000, + "tags": [] + } + ``` + +4. **Update the policy via the API** + + Save the new `policies.json` file and then let's POST it back to the new environment: + + ```{.copyWrapper} + curl -X POST -H "authorization: {API-TOKEN}" \ + -s \ + -H "Content-Type: application/json" \ + -d @policies.json \ + https://{YOUR-NEW-ENV}/api/portal/policies | python -mjson.tool + ``` + +That's it, Tyk will now load this policy, and you will be able to manage and edit it the same way in your new environment, if you are re-creating tokens in your new environment, then those tokens' ACL does not need to be changed to a new policy ID since the legacy one will always be used as the reference for the policy. + +#### Policy IDs in the Dashboard + +After migrating a Policy from one environment to another, it is important to note that the **displayed** Policy ID is not going to match. **That is okay**. It happens because Tyk Dashboard displays the [`Mongo ObjectId`](https://docs.mongodb.com/manual/reference/glossary/#term-id), which is the `_id` field, but the `id` is the important part. + +**For example:** + +Policies in source environment +Policy ID Before + +Policies in target environment after migration +Policy ID After + +Notice that the IDs appear to be different. These are the BSON IDs and are expected to be different. But if we look for the underlying GUID `id`, you can see it's been mapped properly in the target environment. + +``` +$ curl dash-host-source/api/portal/policies/ + + .... + "_id": "5eb1b133e7644400013e54ec", + "id": "", + "name": "credit score", + +$ curl dash-host-target/api/portal/policies/ + + .... + "_id": "5f03be2ce043fe000177b047", + "id": "5eb1b133e7644400013e54ec", + "name": "credit score", +``` + +As you can see, under the hood, the policy has been migrated correctly with target Tyk Dashboard saving the proper ID inside `id`. That is the value that will be referred to inside Key Creation, etc. + +### Use Tyk-Sync + +You can also use our new Tyk-Sync tool which allows you to sync your Policies (and APIs) with a Version Control System (VCS). You can then move your Policies between environments. See [Tyk-Sync](/api-management/automations/sync) for more details. + +## Gateway Sharding + +With Tyk, it is easy to enable a sharded configuration, you can deploy Gateways which selectively load APIs. This unlocks abilities to run Gateways in multiple zones, all connected to the same Control Plane. This allows for GDPR deployments, development/test Gateways, or even DMZ/NON-DMZ Gateways. + +Couple this functionality with the Tyk [Multi Data Center Bridge](/api-management/mdcb#managing-geographically-distributed-gateways-to-minimize-latency-and-protect-data-sovereignty) to achieve a global, multi-cloud deployment. + +### Configure a Gateway as a shard + +Setting up a Gateway to be a shard, or a zone, is very easy. All you do is tell the node in the tyk.conf file what tags to respect and that it is segmented: + +```{.copyWrapper} +... +"db_app_conf_options": { + "node_is_segmented": true, + "tags": ["qa", "uat"] +}, + ... +``` + +Tags are always treated as OR conditions, so this node will pick up all APIs that are marked as `qa` or `uat`. + +### Tag an API for a shard using the Dashboard + +From the API Designer, select the **Advanced Options** tab: + +Advanced options tab + +Scroll down to the **Segment Tags** options: + +Segment tags section + +Set the tag name you want to apply, and click **Add**. + +When you save the API, the tags will become immediately active. If any Gateways are configured to only load tagged API Definitions then this configuration will only be loaded by the relevant Gateway. + +### Tag an API for a shard using Tyk Operator + +Add the tag names to the tags mapping field within an API Definition as shown in the example below: + +```yaml {linenos=table,hl_lines=["8-9"],linenostart=1} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + tags: + - edge + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +## Tyk Self Managed + +### Gateway & API Sharding +Tyk Gateway has a very powerful functionality that allows you to selectively choose which APIs are to be loaded on which Gateways. + +Imagine the case where you have two sets of APIs, Internal & External. You want to prevent your Internal APIs from being accessed or visible outside your protected network. Well, [sharding](/api-management/multiple-environments#what-is-api-sharding-) makes it extremely easy to configure your Tyk Gateways from the Dashboard. + +**Steps for Configuration:** + +1. **Configure a Gateway as a shard** + + Setting up a gateway to be a shard, or a zone, is very easy. All you do is tell the node in the tyk.conf file what tags to respect and that it is segmented: + + ```{.copyWrapper} + ... + "db_app_conf_options": { + "node_is_segmented": true, + "tags": ["private-gw", "edge"] + }, + ... + ``` + + Tags are always treated as OR conditions, so this node will pick up all APIs that are marked as `private-gw` or `edge`. + + + +In order to expose more details about the Gateway to the Dashboard, you can now configure the [edge_endpoints](/tyk-dashboard/configuration#edge_endpoints) section in the tyk-analytics.conf, and the Dashboard UI will pick that up and present you a list of Gateways you can chose from when creating an API. + + + +2. **Tag an API for a shard using the Dashboard** + + To add an API Tag to a an API configuration in the Dashboard, Select Edit from your API options, and select the *Advanced Options* tab: + + Advanced options tab location + + Then scroll down to the *Segment Tags* section: + + Segement tags section + + In this section, set the tag name you want to apply, and click *Add*. + + When you save the API, the tags will become immediately active, and if any Gateways are configured to only load tagged API Definitions then this configuration will only be loaded by the relevant Gateway. + +### Exposed Gateway tags to Dashboard UI + +From version 3.2.2 of the Tyk Dashboard, if [edge_endpoints](/tyk-dashboard/configuration#edge_endpoints) are being configured in tyk-analytics.conf, your Dashboard will automatically pick that list up for you, and display it in the UI when you create your API. + +List of available Gateways + +Once you select one or more Gateways, the *Segment Tags* section will be automatically prefilled with the tag values from the `edge_endpoints` configuration. + +List of segment tags + +Also, for every Gateway selected, there will be an API URL presented at the top of the page, within the *Core Settings* tab. + +List of API URLs + +### Target an API Definition via JSON + +In your API definition, add a tags section to the root of the API Definition: + +```{.copyWrapper} +"tags": ["private-gw"] +``` + +This will also set the tags for the API and when API requests are made through this Gateway, these tags will be transferred in to the analytics data set. + +### API Tagging with On-Premises + +API Sharding with Self-Managed is very flexible, but it behaves a little differently to sharding with Tyk Cloud Hybrid & Tyk Global Self-Managed deployments. The key difference is that with the latter, you can have federated Gateway deployments with **their own redis databases**. However with Tyk Self-Managed the zoning is limited to tags only, and must share a single Redis database. + +To isolate Self-Managed Gateway installations across data centers you will need to use Tyk Multi Data Center Bridge component. This system powers the functionality of Tyk Cloud & Tyk Cloud Hybrid in our cloud and is available to our enterprise customers as an add-on. diff --git a/api-management/performance-monitoring.mdx b/api-management/performance-monitoring.mdx new file mode 100644 index 000000000..cc737e617 --- /dev/null +++ b/api-management/performance-monitoring.mdx @@ -0,0 +1,111 @@ +--- +title: "Performance Monitoring" +description: "How to analyze Tyk Performance" +keywords: "Performance, Monitoring, Observability" +sidebarTitle: "Performance Monitoring" +--- + +## What is the performance impact of analytics + +Tyk Gateway allows analytics to be recorded and stored in a persistent data store (MongoDB/SQL) for all APIs by default, via [Tyk Pump](/api-management/tyk-pump#tyk-analytics-record-fields). + +Tyk Gateway generates transaction records for each API request and response, containing [analytics data](/api-management/tyk-pump#tyk-analytics-record-fields) relating to: the originating host (where the request is coming from), which Tyk API version was used, the HTTP method requested and request path etc. + +The transaction records are transmitted to Redis and subsequently transferred to a persistent [data store](/api-management/tyk-pump#external-data-stores) of your choice via Tyk Pump. Furthermore, Tyk Pump can also be configured to [aggregate](/api-management/logs-metrics#aggregated-analytics) the transaction records (using different data keys - API ID, access key, endpoint, response status code, location) and write to a persistent data store. Tyk Dashboard uses this data for: +- [Aggregated analytics](/api-management/dashboard-configuration#traffic-analytics) - Displaying analytics based on the aggregated data. +- [Log Browser](/api-management/dashboard-configuration#activity-logs) to display raw transaction records. + +### How Do Analytics Impact Performance? + +Analytics may introduce the problem of increased CPU load and a decrease in the number of requests per second (RPS). + +In the *Tyk Dashboard API* screen below, there are two APIs, *track* and *notrack*. The APIs were created to conduct a simple load test, to show the gateway's RPS (requests per second) for each API: + +- **track**: Traffic to this API is tracked, i.e. transaction records are generated for each request/response. +- **notrack**: Traffic to this API is not tracked, i.e. transaction records are not generated for each request/response. + +apis measured in Tyk Dashboard + +100,000 requests were sent to each API and the rate at which Tyk was able to handle those requests (number of requests per second) was measured. The results for the *tracked* API are displayed in the left pane terminal window; with the right pane showing the results for the *untracked* API. + +### Tracked API Performance + +measuring tracked API performance impact + +### Untracked API Performance + +measuring do_not_track API performance impact + +### Explaining the results + +We can see that **19,253.75** RPS was recorded for the *untracked* API; with **16,743.6011** RPS reported for the *tracked* API. The number of requests per second decreased by **~13%** when analytics was enabled. + +### What Can Be Done To Address This Performance Impact? + +Tyk is configurable, allowing fine grained control over which information should be recorded and which can be skipped, thus reducing CPU cycles, traffic and storage. + +Users can selectively prevent the generation of analytics for +[do_not_track](/api-management/traffic-transformation/do-not-track) middleware: +- **Per API**: Tyk Gateway will not create records for requests/responses for any endpoints of an API. +- **Per Endpoint**: Tyk Gateway will not create records for requests/responses for specific endpoints. + +When set, this prevents Tyk Gateway from generating the transaction records. Without transaction records, Tyk Pump will not transfer analytics to the chosen persistent data store. It's worth noting that the [track middleware](/api-management/dashboard-configuration#activity-by-endpoint) exclusively influences the generation of *endpoint popularity* aggregated data by Tyk Pump. + +### Conclusion + +[Disabling](/api-management/traffic-transformation/do-not-track) the creation of analytics (either per API or for specific endpoints) helps to reduce CPU cycles and network requests for systems that exhibit high load and traffic, e.g. social media platforms, streaming, financial services and trading platforms. + +Application decisions need to be made concerning which endpoints are non critical and can thus have analytics disabled. Furthermore, benchmarking and testing will be required to evaluate the actual benefits for the application specific use case. + +Subsequently, it is worthwhile monitoring traffic and system load and using this feature to improve performance. + +## How to reduce CPU usage in a Redis Cluster + +### What does high CPU usage in a Redis node within a Redis Cluster mean ? + +When a single Redis node within a Redis Cluster exhibits high CPU usage, it indicates that the CPU resources of that particular node are being heavily utilized compared to others in the cluster. + +The illustration below highlights the scenario where a single Redis node is exhibiting high CPU usage of 1.20% within a Redis Cluster. + +analytics keys stored in one Redis server + +### What could be causing this high CPU usage ? + +One possible reason for high CPU usage in a single Redis node within a Redis Cluster is that analytics features are enabled and keys are being stored within that specific Redis node. + +### How does storing keys within a single Redis server contribute to high CPU usage ? + +A high volume of analytics traffic can decrease performance, since all analytics keys are stored within one Redis server. Storing keys within a single Redis server can lead to increased CPU usage because all operations related to those keys, such as retrieval, updates and analytics processing, are concentrated on that server. This can result in heavier computational loads on that particular node. This leads to high CPU usage. + +### What can be done to address high CPU usage in this scenario ? + +Consider distributing the analytics keys across multiple Redis nodes within the cluster. This can help distribute the computational load more evenly, reducing the strain on any single node and potentially alleviating the high CPU usage. + +In Redis, *key sharding* is a term used to describe the practice of distributing data across multiple Redis instances or *shards* based on the keys. This feature is provided by [Redis Cluster](https://redis.io/docs/management/scaling/) and provides horizontal scalability and improved performance. + +Tyk supports configuring this behavior so that analytics keys are distributed across multiple servers within a Redis cluster. The image below illustrates that CPU usage is reduced across two Redis servers after making this configuration change. + +analytics keys distributed across Redis servers + +### How do I configure Tyk to distribute analytics keys to multiple Redis shards ? + +Follow these steps: + +1. **Check that your Redis Cluster is correctly configured** + + Confirm that the `enable_cluster` configuration option is set to true in the [Tyk Gateway](/tyk-oss-gateway/configuration#storageenable_cluster), [Tyk Dashboard](/tyk-dashboard/configuration#enable_cluster) and [Tyk Pump](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#analytics_storage_configenable_cluster) configuration files. This setting + informs Tyk that a Redis Cluster is in use for key storage. + + Ensure that the `addrs` array is populated in the [Tyk Gateway](/tyk-oss-gateway/configuration#storageaddrs) and [Tyk Pump](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#analytics_storage_configaddrs) configuration files (*tyk.conf* and *pump.conf*) with the addresses of all Redis Cluster nodes. If you are using Tyk Self Managed (the licensed product), also update [Tyk Dashboard](/tyk-dashboard/configuration#redis_addrs) configuration file (*tyk_analytics.conf*). This ensures that the Tyk components can interact with the entire Redis Cluster. Please refer to the [configure Redis Cluster](/tyk-configuration-reference/redis-cluster-sentinel#redis-cluster-and-tyk-gateway) guide for further details. + +2. **Configure Tyk to distribute analytics keys to multiple Redis shards** + + To distribute analytics keys across multiple Redis shards effectively you need to configure the Tyk components to leverage the Redis cluster's sharding capabilities: + + 1. **Optimize Analytics Configuration**: In the Tyk Gateway configuration (tyk.conf), set [analytics_config.enable_multiple_analytics_keys](/tyk-oss-gateway/configuration#analytics_configenable_multiple_analytics_keys) to true. This option allows Tyk to distribute analytics data across Redis nodes, using multiple keys for the analytics. There's a corresponding option for Self Managed MDCB, also named [enable_multiple_analytics_keys](/tyk-multi-data-centre/mdcb-configuration-options#enable_multiple_analytics_keys). Useful only if the gateways in the data plane are configured to send analytics to MDCB. + 2. **Optimize Connection Pool Settings**: Adjust the [optimization_max_idle](/tyk-oss-gateway/configuration#storageoptimisation_max_idle) and [optimization_max_active](/tyk-oss-gateway/configuration#storageoptimisation_max_active) settings in the configuration files to ensure that the connection pool can handle the analytics workload without overloading any Redis shard. + 3. **Use a Separate Analytics Store**: For high analytics traffic, you can opt to use a dedicated *Redis Cluster* for analytics by setting [enable_separate_analytics_store](/tyk-oss-gateway/configuration#enable_separate_analytics_store) to true in the Tyk Gateway configuration file (*tyk.conf*) and specifying the separate Redis cluster configuration in the `analytics_storage` section. Please consult the [separated analytics storage](/api-management/tyk-pump#separated-analytics-storage) guide for an example with *Tyk Pump* that can equally be applied to *Tyk Gateway*. + 4. **Review and Test**: After implementing these changes, thoroughly review your configurations and conduct load testing to verify that the analytics traffic is now evenly distributed across all Redis shards. + + By following these steps you can enhance the distribution of analytics traffic across the Redis shards. This should lead to improved scalability and performance of your Tyk deployment. + diff --git a/api-management/plugins/advance-config.mdx b/api-management/plugins/advance-config.mdx new file mode 100644 index 000000000..a59a7c34c --- /dev/null +++ b/api-management/plugins/advance-config.mdx @@ -0,0 +1,625 @@ +--- +title: "Custom Plugins Advance Configuration" +description: "Explore advanced configuration options for custom plugins in Tyk, including CICD automation, OpenTelemetry instrumentation, and gRPC server health checks in Kubernetes." +sidebarTitle: "Advance Configuration" +--- + +## CICD - Automating Your Plugin Builds + +It's very important to automate the deployment of your infrastructure. + +Ideally, you store your configurations and code in version control, and then through a trigger, have the ability to deploy everything automatically into higher environments. + +With custom plugins, this is no different. + +To illustrate this, we can look at the GitHub Actions of the [example repo][0]. + +We see that upon every pull request, a section of steps are taken to "Build, [Bundle](/api-management/plugins/overview#plugin-bundles), Release Go Plugin". + +Let's break down the [workflow file][1]: + + +### Compiling the Plugin + +We can see the first few steps replicate our first task, bootstrapping the environment and compiling the plugin into a binary format. + +```make + steps: + - uses: actions/checkout@v3 + + - name: Copy Env Files + run: cp tyk/confs/tyk_analytics.env.example tyk/confs/tyk_analytics.env + + - name: Build Go Plugin + run: make go-build +``` + +We can look at the [Makefile][2] to further break down the last `go-build` command. + +### Bundle The Plugin + +The next step of the workflow is to "[bundle](/api-management/plugins/overview#plugin-bundles)" the plugin. + +``` +- name: Bundle Go Plugin + run: docker-compose run --rm --user=1000 --entrypoint "bundle/bundle-entrypoint.sh" tyk-gateway +``` + +This command generates a "bundle" from the sample Go plugin in the repo. + + + +For added security, please consider signing your [bundles](/api-management/plugins/overview#plugin-deployment-types), especially if the connection between the Gateways and the Bundler server traverses the internet. + + + + +Custom plugins can be "bundled", (zipped/compressed) into a standard format, and then uploaded to some server so that they can be downloaded by the Gateways in real time. + +This process allows us to decouple the building of our custom plugins from the runtime of the Gateways. + +In other words, Gateways can be scaled up and down, and pointed at different plugin repos very easily. This makes it easier to deploy Custom plugins especially in containerized environments such as Kubernetes, where we don't have to worry about persistent volumes. + +You can read more about plugin bundles [here][3]. + +### Deploy The Plugin + +Next step of the workflow is to publish our bundle to a server that's reachable by the Gateways. + +```make +- name: Upload Bundle + uses: actions/upload-artifact@v3 + with: + name: customgoplugin.zip + path: tyk/bundle/bundle.zip + + - uses: jakejarvis/s3-sync-action@master + with: + args: --acl public-read --follow-symlinks + env: + AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_REGION: 'us-east-1' + SOURCE_DIR: 'tyk/bundle' +``` + +This step uploads the Bundle to both GitHub and an AWS S3 bucket. Obviously, your workflow will look slightly different here. + + + +For seamless deployments, take a look at multi-version [plugin support](/tyk-cloud/using-plugins) to enable zero downtime deployments of your Tyk Gateway installs + + + +### Configure the Gateway + +In order to instruct the Gateway to download the bundle, we need two things: + +1. The root server - The server which all bundles will be downloaded from. This is set globally in the Tyk conf file [here](/tyk-oss-gateway/configuration#enable_bundle_downloader). + +2. The name of the bundle - this is generated during your workflow usually. This is defined at the API level (this is where you declare Custom plugins, as evident in task 2) + +The field of the API Definition that needs to be set is `custom_middleware_bundle`. + +### Summary + +That's it! + +We've set up our dev environment, built, compiled, a Custom Go plugin, loaded onto a Tyk Gateway, and tested it by sending an API request. Finally, we've talked about deploying a Bundle in a production grade set up. + +Have a look at our [examples repo][4] for more inspiration. + +[0]: https://github.com/TykTechnologies/custom-go-plugin/actions +[1]: https://github.com/TykTechnologies/custom-go-plugin/blob/master/.github/workflows/makefile.yml +[2]: https://github.com/TykTechnologies/custom-go-plugin/blob/master/Makefile#L59 +[3]: https://github.com/TykTechnologies/custom-go-plugin#deploying-the-go-plugin +[4]: https://github.com/TykTechnologies/custom-plugin-examples + +## Instrumenting Plugins with OpenTelemetry + +By instrumenting your custom plugins with Tyk's *OpenTelemetry* library, you can gain additional insights into custom plugin behavior like time spent and exit status. Read on to see some examples of creating span and setting attributes for your custom plugins. + + + +**Note:** +Although this documentation is centered around Go plugins, the outlined principles are universally applicable to plugins written in other languages. Ensuring proper instrumentation and enabling detailed tracing will integrate the custom plugin span into the trace, regardless of the underlying programming language. + + + +### Prerequisites + +- Go v1.19 or higher +- Gateway instance with OpenTelemetry and DetailedTracing enabled: + +Add this field within your [Gateway config file](/tyk-oss-gateway/configuration): + +```json +{ + "opentelemetry": { + "enabled": true + } +} +``` + +And this field within your [API definition](/api-management/gateway-config-introduction): + +```json +{ + "detailed_tracing": true +} +``` + +You can find more information about enabling OpenTelemetry [here](/api-management/logs-metrics#opentelemetry). + + + +DetailedTracing must be enabled in the API definition to see the plugin spans in the traces. + + + +In order to instrument our plugins we will be using Tyk's OpenTelemetry library implementation. +You can import it by running the following command: + +```console +$ go get github.com/TykTechnologies/opentelemetry +``` + +
+ + + +In this case, we are using our own OpenTelemetry library for convenience. You can also use the [Official OpenTelemetry Go SDK](https://github.com/open-telemetry/opentelemetry-go) + + + +### Create a new span from the request context + +`trace.NewSpanFromContext()` is a function that helps you create a new span from the current request context. When called, it returns two values: a fresh context with the newly created span embedded inside it, and the span itself. This method is particularly useful for tracing the execution of a piece of code within a web request, allowing you to measure and analyze its performance over time. + +The function takes three parameters: + +1. `Context`: This is usually the current request's context. However, you can also derive a new context from it, complete with timeouts and cancelations, to suit your specific needs. +2. `TracerName`: This is the identifier of the tracer that will be used to create the span. If you do not provide a name, the function will default to using the `tyk` tracer. +3. `SpanName`: This parameter is used to set an initial name for the child span that is created. This name can be helpful for later identifying and referencing the span. + +Here's an example of how you can use this function to create a new span from the current request context: + +```go +package main +import ( + "net/http" + "github.com/TykTechnologies/opentelemetry/trace" +) +// AddFooBarHeader adds a custom header to the request. +func AddFooBarHeader(rw http.ResponseWriter, r *http.Request) { + // We create a new span using the context from the incoming request. + _, newSpan := trace.NewSpanFromContext(r.Context(), "", "GoPlugin_first-span") + // Ensure that the span is properly ended when the function completes. + defer newSpan.End() + // Add a custom "Foo: Bar" header to the request. + r.Header.Add("Foo", "Bar") +} +func main() {} +``` + +In your exporter (in this case, Jaeger) you should see something like this: + +OTel Span from context + +As you can see, the name we set is present: `GoPlugin_first-span` and it's the first child of the `GoPluginMiddleware` span. + +### Modifying span name and set status + +The span created using `trace.NewSpanFromContext()` can be further configured after its creation. You can modify its name and set its status: + +```go +func AddFooBarHeader(rw http.ResponseWriter, r *http.Request) { + _, newSpan := trace.NewSpanFromContext(r.Context(), "", "GoPlugin_first-span") + defer newSpan.End() + + // Set a new name for the span. + newSpan.SetName("AddFooBarHeader Testing") + + // Set the status of the span. + newSpan.SetStatus(trace.SPAN_STATUS_OK, "") + + r.Header.Add("Foo", "Bar") +} +``` + +This updated span will then appear in the traces as `AddFooBarHeader Testing` with an **OK Status**. + +The second parameter of the `SetStatus` method can accept a description parameter that is valid for **ERROR** statuses. + +The available span statuses in ascending hierarchical order are: + +- `SPAN_STATUS_UNSET` + +- `SPAN_STATUS_ERROR` + +- `SPAN_STATUS_OK` + +This order is important: a span with an **OK** status cannot be overridden with an **ERROR** status. However, the reverse is possible - a span initially marked as **UNSET** or **ERROR** can later be updated to **OK**. + +OTel Span name and status + +Now we can see the new name and the `otel.status_code` tag with the **OK** status. + +### Setting attributes + +The `SetAttributes()` function allows you to set attributes on your spans, enriching each trace with additional, context-specific information. + +The following example illustrates this functionality using the OpenTelemetry library's implementation by Tyk + +```go +func AddFooBarHeader(rw http.ResponseWriter, r *http.Request) { + _, newSpan := trace.NewSpanFromContext(r.Context(), "", "GoPlugin_first-span") + defer newSpan.End() + + // Set an attribute on the span. + newSpan.SetAttributes(trace.NewAttribute("go_plugin", "1")) + + r.Header.Add("Foo", "Bar") +} +``` + +In the above code snippet, we set an attribute `go_plugin` with a value of `1` on the span. This is just a demonstration; in practice, you might want to set attributes that carry meaningful data relevant to your tracing needs. + +Attributes are key-value pairs. The value isn't restricted to string data types; it can be any value, including numerical, boolean, or even complex data types, depending on your requirements. This provides flexibility and allows you to include rich, structured data within your spans. + +The illustration below, shows how the `go_plugin` attribute looks in Jaeger: + +OTel Span attributes + +### Multiple functions = Multiple spans + +To effectively trace the execution of your plugin, you can create additional spans for each function execution. By using context propagation, you can link these spans, creating a detailed trace that covers multiple function calls. This allows you to better understand the sequence of operations, pinpoint performance bottlenecks, and analyze application behavior. + +Here's how you can implement it: + +```go +func AddFooBarHeader(rw http.ResponseWriter, r *http.Request) { + // Start a new span for this function. + ctx, newSpan := trace.NewSpanFromContext(r.Context(), "", "GoPlugin_first-span") + defer newSpan.End() + + // Set an attribute on this span. + newSpan.SetAttributes(trace.NewAttribute("go_plugin", "1")) + + // Call another function, passing in the context (which includes the new span). + NewFunc(ctx) + + // Add a custom "Foo: Bar" header to the request. + r.Header.Add("Foo", "Bar") +} + +func NewFunc(ctx context.Context) { + // Start a new span for this function, using the context passed from the calling function. + _, newSpan := trace.NewSpanFromContext(ctx, "", "GoPlugin_second-span") + defer newSpan.End() + + // Simulate some processing time. + time.Sleep(1 * time.Second) + + // Set an attribute on this span. + newSpan.SetAttributes(trace.NewAttribute("go_plugin", "2")) +} +``` + +In this example, the `AddFooBarHeader` function creates a span and then calls `NewFunc`, passing the updated context. The `NewFunc` function starts a new span of its own, linked to the original through the context. It also simulates some processing time by sleeping for 1 second, then sets a new attribute on the second span. In a real-world scenario, the `NewFunc` would contain actual code logic to be executed. + +The illustration below, shows how this new child looks in Jaeger: + +OTel Span attributes + +### Error handling + +In OpenTelemetry, it's essential to understand the distinction between recording an error and setting the span status to error. The `RecordError()` function records an error as an exception span event. However, this alone doesn't change the span's status to error. To mark the span as error, you need to make an additional call to the `SetStatus()` function. + +> RecordError will record err as an exception span event for this span. An additional call to SetStatus is required if the Status of the Span should be set to Error, as this method does not change the Span status. If this span is not being recorded or err is nil then this method does nothing. + +Here's an illustrative example with function calls generating a new span, setting attributes, setting an error status, and recording an error: + +```go +func AddFooBarHeader(rw http.ResponseWriter, r *http.Request) { + // Create a new span for this function. + ctx, newSpan := trace.NewSpanFromContext(r.Context(), "", "GoPlugin_first-span") + defer newSpan.End() + + // Set an attribute on the new span. + newSpan.SetAttributes(trace.NewAttribute("go_plugin", "1")) + + // Call another function, passing in the updated context. + NewFunc(ctx) + + // Add a custom header "Foo: Bar" to the request. + r.Header.Add("Foo", "Bar") +} + +func NewFunc(ctx context.Context) { + // Create a new span using the context passed from the previous function. + ctx, newSpan := trace.NewSpanFromContext(ctx, "", "GoPlugin_second-span") + defer newSpan.End() + + // Simulate some processing time. + time.Sleep(1 * time.Second) + + // Set an attribute on the new span. + newSpan.SetAttributes(trace.NewAttribute("go_plugin", "2")) + + // Call a function that will record an error and set the span status to error. + NewFuncWithError(ctx) +} + +func NewFuncWithError(ctx context.Context) { + // Start a new span using the context passed from the previous function. + _, newSpan := trace.NewSpanFromContext(ctx, "", "GoPlugin_third-span") + defer newSpan.End() + + // Set status to error. + newSpan.SetStatus(trace.SPAN_STATUS_ERROR, "Error Description") + + // Set an attribute on the new span. + newSpan.SetAttributes(trace.NewAttribute("go_plugin", "3")) + + // Record an error in the span. + newSpan.RecordError(errors.New("this is an auto-generated error")) +} +``` + +In the above code, the `NewFuncWithError` function demonstrates error handling in OpenTelemetry. First, it creates a new span. Then it sets the status to error, and adds an attribute. Finally, it uses `RecordError()` to log an error event. This two-step process ensures that both the error event is recorded and the span status is set to reflect the error. + +OTel Span error handling + + +## Highly Available gRPC Servers in Kubernetes + +When deploying gRPC servers to host [rich plugins](/api-management/plugins/rich-plugins#using-grpc) in Kubernetes, implementing proper health checks on those servers is crucial so that you can achieve high availability and enable seamless rolling updates. + +Kubernetes needs to know when your gRPC server is ready to accept traffic otherwise: + +- Traffic may be routed to pods that aren't ready +- Failed pods won't be automatically restarted +- Load balancing becomes unreliable +- Rolling deployments can cause service disruption + +When using gRPC plugins to implement custom processing of API requests, this is especially important since the Gateway depends on having reliable access to these services to process requests. If Tyk is unable to reach the gRPC server, then it will be unable to correctly execute the plugins hosted there, leading to API requests failing. + +### Key Benefits of Health Checks for Rolling Updates + +1. **Zero-Downtime Deployments** +- Readiness probes ensure new pods are fully ready before receiving traffic +- Old pods continue serving until new ones are ready +- Traffic is never routed to non-functional pods + +2. **Graceful Shutdown** +- SIGTERM triggers immediate graceful shutdown sequence +- Health status changes to "not serving" during shutdown +- gRPC server stops gracefully, finishing in-flight requests +- 30-second timeout ensures forced shutdown if graceful shutdown hangs + +3. **Failure Recovery** +- Liveness probes detect and restart unhealthy pods +- Startup probes give adequate time for slow-starting services +- Failed deployments are automatically rolled back + +### Using gRPC Health Probes with Tyk + +Tyk's native support for gRPC health probes is provided via the Health Checking service in the standard gRPC Go library and provides several benefits: + +1. **Native Integration**: Uses the same transport protocol as your main service +2. **Service-Specific Checks**: Can check the health of individual gRPC services +3. **Better Resource Utilization**: No need for a separate HTTP server +4. **Consistent Protocol**: Maintains gRPC throughout the stack + +#### gRPC Health Checking Protocol + +The following example uses the [gRPC Health Checking Protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md) instead of HTTP endpoints. + +#### Health Check Implementation Details + +The gRPC health server manages two service states: + +- `""` (empty string): Overall server health +- `"coprocess.Dispatcher"`: Specific service health for readiness checks + +Health states transition as follows: +- **Startup**: `NOT_SERVING` β†’ `SERVING` (when ready) +- **Shutdown**: `SERVING` β†’ `NOT_SERVING` (immediate) +- **Error**: `SERVING` β†’ `NOT_SERVING` (on failure) + +#### Example Implementation + +##### Required Dependencies + +You should add this to your `go.mod`: + +```go +require ( + google.golang.org/grpc v1.50.0 // or later +) +``` + +##### main.go Setup + +```go +package main + +import ( + "context" + "log" + "net" + "os" + "os/signal" + "sync/atomic" + "syscall" + "time" + + "github.com/TykTechnologies/tyk/coprocess" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + ListenAddress = ":50051" +) + +func main() { + // Track server readiness + var isReady int32 + + lis, err := net.Listen("tcp", ListenAddress) + if err != nil { + log.Fatalf("Failed to listen: %v", err) + } + + log.Printf("starting grpc server on %v", ListenAddress) + s := grpc.NewServer() + + // Register the main coprocess service + coprocess.RegisterDispatcherServer(s, &Dispatcher{}) + + // Register health service + healthServer := health.NewServer() + grpc_health_v1.RegisterHealthServer(s, healthServer) + + // Initially mark all services as not serving + healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + healthServer.SetServingStatus("coprocess.Dispatcher", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + + // Channel to listen for errors coming from the listener. + serverErrors := make(chan error, 1) + + // Start the service listening for requests. + go func() { + // Mark as ready once server is initialized + atomic.StoreInt32(&isReady, 1) + + // Set health status to serving + healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) + healthServer.SetServingStatus("coprocess.Dispatcher", grpc_health_v1.HealthCheckResponse_SERVING) + + log.Printf("gRPC server is ready to accept connections") + serverErrors <- s.Serve(lis) + }() + + // Channel to listen for an interrupt or terminate signal from the OS. + shutdown := make(chan os.Signal, 1) + signal.Notify(shutdown, os.Interrupt, syscall.SIGTERM) + + // Blocking main and waiting for shutdown. + select { + case err := <-serverErrors: + atomic.StoreInt32(&isReady, 0) + // Mark services as not serving + healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + healthServer.SetServingStatus("coprocess.Dispatcher", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + log.Fatalf("Server error: %v", err) + case sig := <-shutdown: + atomic.StoreInt32(&isReady, 0) + log.Printf("Received signal: %v", sig) + + // Mark services as not serving during shutdown + healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + healthServer.SetServingStatus("coprocess.Dispatcher", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + + // Give outstanding requests 5 seconds to complete. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + stopped := make(chan struct{}) + go func() { + s.GracefulStop() + close(stopped) + }() + + select { + case <-ctx.Done(): + log.Printf("Graceful shutdown timed out") + s.Stop() + case <-stopped: + log.Printf("Graceful shutdown completed") + } + } +} + +``` + +##### Kubernetes Deployment + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tyk-grpc-coprocess + labels: + app: tyk-grpc-coprocess +spec: + replicas: 3 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 + selector: + matchLabels: + app: tyk-grpc-coprocess + template: + metadata: + labels: + app: tyk-grpc-coprocess + spec: + containers: + - name: grpc-server + image: your-registry/tyk-grpc-coprocess:latest + ports: + - containerPort: 50051 + name: grpc + + ## Readiness probe - determines when pod is ready for traffic + readinessProbe: + grpc: + port: 50051 + service: coprocess.Dispatcher + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + + ## Liveness probe - determines when to restart pod + livenessProbe: + grpc: + port: 50051 + initialDelaySeconds: 15 + periodSeconds: 20 + timeoutSeconds: 5 + failureThreshold: 3 + + ## Startup probe - gives more time for initial startup + startupProbe: + grpc: + port: 50051 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 30 + +--- +apiVersion: v1 +kind: Service +metadata: + name: tyk-grpc-coprocess-service +spec: + selector: + app: tyk-grpc-coprocess + ports: + - name: grpc + port: 50051 + targetPort: 50051 + protocol: TCP + type: ClusterIP +``` + diff --git a/api-management/plugins/golang.mdx b/api-management/plugins/golang.mdx new file mode 100644 index 000000000..7b4e51eeb --- /dev/null +++ b/api-management/plugins/golang.mdx @@ -0,0 +1,1040 @@ +--- +title: "Golang Plugins" +description: "How to manage users, teams, permissions, rbac in Tyk Dashboard" +sidebarTitle: "Golang Plugins" +--- + +import { ButtonLeft } from '/snippets/ButtonLeft.mdx'; + +## Introduction + +Golang plugins are a very flexible and powerful way to extend the functionality of Tyk by attaching custom logic written in Go to [hooks](/api-management/plugins/plugin-types#plugin-types) in the Tyk [middleware chain](/api-management/traffic-transformation#request-middleware-chain). +The chain of middleware is specific to an API and gets created at API load time. When Tyk Gateway performs an API re-load it also loads any custom middleware and "injects" them into a chain to be called at different stages of the HTTP request life cycle. + +For a quick-start guide to working with Go plugins, start [here](/api-management/plugins/overview#getting-started). + +The [Go plugin writing guide](/api-management/plugins/golang#writing-custom-go-plugins) provides details of how to access dynamic data (such as the key session object) from your Go functions. Combining these resources provides you with a powerful set of tools for shaping and structuring inbound traffic to your API. + +## Supported plugin types + +All of Tyk's [custom middleware hooks](/api-management/plugins/plugin-types#plugin-types) support Go plugins. They represent different stages in the request and response [middleware chain](/api-management/traffic-transformation#request-middleware-chain) where custom functionality can be added. + +- **Pre** - supports an array of middleware that run before any others (i.e. before authentication) +- **Auth** - this middleware performs custom authentication and adds API key session info into the request context and can be used only if the API definition has both: + - `"use_keyless": false` + - `"use_go_plugin_auth": true` +- **Post-Auth** - supports an array of middleware to be run after authentication; at this point, we have authenticated the session API key for the given key (in the request context) so we can perform any extra checks. This can be used only if the API definition has both: + - `"use_keyless": false` + - an authentication method specified +- **Post** - supports an array of middleware that run at the very end of the middleware chain, just before Tyk makes a round-trip to the upstream target +- **Response** - run only at the point the response has returned from a service upstream of the API Gateway; note that the [method signature for Response Go plugins](/api-management/plugins/golang#creating-a-custom-response-plugin) is slightly different from the other hook types + + + + + The `use_keyless` and `use_go_plugin_auth` fields are populated automatically with the correct values if you add a plugin to the **Auth** or **Post-Auth** hooks when using the Tyk Dashboard. + + +## Custom Go plugin development flow + +Go Plugins need to be compiled to native shared object code, which can then be loaded by Tyk Gateway. + +We recommend that you familiarize yourself with the following official Go documentation to help you work effectively with Go plugins: + +- [The official plugin package documentation - Warnings](https://pkg.go.dev/plugin) +- [Tutorial: Getting started with multi-module workspaces](https://go.dev/doc/tutorial/workspaces) + + + + + Plugins are currently supported only on Linux, FreeBSD, and macOS, making them unsuitable for applications intended to be portable. + + + +### Tyk Plugin Compiler + +We provide the [Tyk Plugin Compiler](/api-management/plugins/golang#plugin-compiler) docker image, which we **strongly recommend** is used to build plugins compatible with the official Gateway releases. That tool provides the cross compilation toolchain, Go version used to build the release, ensures that compatible flags are used when compiling plugins (such as `-trimpath`, `CC`, `CGO_ENABLED`, `GOOS`, `GOARCH`) and also works around known Go issues such as: + +- https://github.com/golang/go/issues/19004 +- https://www.reddit.com/r/golang/comments/qxghjv/plugin_already_loaded_when_a_plugin_is_loaded/ + +#### Understanding Plugin Compiler Security Scans + +The Tyk Plugin Compiler Docker image is a development tool used exclusively during the build phase to compile custom Go plugins for the Tyk Gateway. This tool: + +1. **Is not a runtime component** - It is never deployed as part of your production Tyk environment +2. **Operates in isolated build environments** - It should only be used in controlled development or CI/CD pipelines +3. **Is not designed to be network-exposed** - It should never be deployed as a service or exposed to untrusted networks + +##### Technical Context +The Plugin Compiler is built on Debian Bullseye to ensure binary compatibility with RHEL8 environments, which are commonly used in enterprise Tyk deployments. Security scanning tools may flag numerous Common Vulnerabilities and Exposures (CVEs) in the base Debian Bullseye libraries included in this image. + +##### Security Clarification +These CVEs do not represent an exploitable attack surface in your Tyk deployment for several reasons: + +1. **Build-time vs. Runtime Separation:** The Plugin Compiler is strictly a build-time tool. The compiled plugins that it produces are what get deployed to your Tyk Gateway, not the compiler itself. + +2. **Ephemeral Usage Pattern**: The recommended usage pattern is to run the compiler only when needed to generate plugin binaries, then discard the container. + +3. **Air-gapped Operation**: The compilation process typically occurs in development environments or CI/CD pipelines that are separate from production systems. + +4. **No Persistent Deployment**: Unlike the Tyk Gateway, Dashboard, and other runtime components, the Plugin Compiler is never deployed as a long-running service in your API management infrastructure. + +For optimal security, we recommend running the Plugin Compiler in isolated build environments and transferring only the compiled plugin binaries to your production Tyk deployment. + + +### Setting up your environment + +It's important to understand the need for plugins to be compiled using exactly the same environment and build flags as the Gateway. To simplify this and minimise the risk of compatibility problems, we recommend the use of [Go workspaces](https://go.dev/blog/get-familiar-with-workspaces), to provide a consistent environment. + +To develop plugins without using the Tyk Plugin Compiler, you'll need: + +- Go (matching the version used in the Gateway, which you can determine using `go.mod`). +- Git to check out Tyk Gateway source code. +- A folder with the code that you want to build into plugins. + +We recommend that you set up a *Go workspace*, which, at the end, is going to contain: + +- `/tyk-release-x.y.z` - the Tyk Gateway source code +- `/plugins` - the plugins +- `/go.work` - the *Go workspace* file +- `/go.work.sum` - *Go workspace* package checksums + +Using the *Go workspace* ensures build compatibility between the plugins and Gateway. + +### Steps for Configuration: + +1. **Checking out Tyk Gateway source code** + + ``` + git clone --branch release-5.3.6 https://github.com/TykTechnologies/tyk.git tyk-release-5.3.6 || true + ``` + + This example uses a particular `release-5.3.6` branch, to match Tyk Gateway release 5.3.6. With newer `git` versions, you may pass `--branch v5.3.6` and it would use the tag. In case you want to use the tag it's also possible to navigate into the folder and issue `git checkout tags/v5.3.6`. + +2. **Preparing the Go workspace** + + Your Go workspace can be very simple: + + 1. Create a `.go` file containing the code for your plugin. + 2. Create a `go.mod` file for the plugin. + 3. Ensure the correct Go version is in use. + + As an example, we can use the [CustomGoPlugin.go](https://github.com/TykTechnologies/custom-go-plugin/blob/master/go/src/CustomGoPlugin.go) sample as the source for our plugin as shown: + + ``` + mkdir -p plugins + cd plugins + go mod init testplugin + go mod edit -go $(go mod edit -json go.mod | jq -r .Go) + wget -q https://raw.githubusercontent.com/TykTechnologies/custom-go-plugin/refs/heads/master/go/src/CustomGoPlugin.go + cd - + ``` + + The following snippet provides you with a way to get the exact Go version used by Gateway from it's [go.mod](https://github.com/TykTechnologies/tyk/blob/release-5.3.6/go.mod#L3) file: + + - `go mod edit -json go.mod | jq -r .Go` (e.g. `1.22.7`) + + This should be used to ensure the version matches between gateway and the plugin. + + To summarize what was done: + + 1. We created a plugins folder and initialzed a `go` project using `go mod` command. + 2. Set the Go version of `go.mod` to match the one set in the Gateway. + 3. Initialzied the project with sample plugin `go` code. + + At this point, we don't have a *Go workspace* but we will create one next so that we can effectively share the Gateway dependency across Go modules. + +3. **Creating the Go workspace** + + To set up the Go workspace, start in the directory that contains the Gateway and the Plugins folder. You'll first, create the `go.work` file to set up your Go workspace, and include the `tyk-release-5.3.6` and `plugins` folders. Then, navigate to the plugins folder to fetch the Gateway dependency at the exact commit hash and run `go mod tidy` to ensure dependencies are up to date. + + Follow these commands: + + ``` + go work init ./tyk-release-5.3.6 + go work use ./plugins + commit_hash=$(cd tyk-release-5.3.6 && git rev-parse HEAD) + cd plugins && go get github.com/TykTechnologies/tyk@${commit_hash} && go mod tidy && cd - + ``` + + The following snippet provides you to get the commit hash exactly, so it can be used with `go get`. + + - `git rev-parse HEAD` + + The Go workspace file (`go.work`) should look like this: + + ``` + go 1.22.7 + + use ( + ./plugins + ./tyk-release-5.3.6 + ) + ``` + +4. **Building and validating the plugin** + + Now that your *Go workspace* is ready, you can build your plugin as follows: + + ``` + cd tyk-release-5.3.6 && go build -tags=goplugin -trimpath . && cd - + cd plugins && go build -trimpath -buildmode=plugin . && cd - + ``` + + These steps build both the Gateway and the plugin. + + You can use the Gateway binary that you just built to test that your new plugin loads into the Gateway without having to configure and then make a request to an API using this command: + + ``` + ./tyk-release-5.3.6/tyk plugin load -f plugins/testplugin.so -s AuthCheck + ``` + + You should see an output similar to: + + ``` + time="Oct 14 13:39:55" level=info msg="--- Go custom plugin init success! ---- " + [file=plugins/testplugin.so, symbol=AuthCheck] loaded ok, got 0x76e1aeb52140 + ``` + + The log shows that the plugin has correctly loaded into the Gateway and that its `init` function has been successfully invoked. + +5. **Summary** + + In the preceding steps we have put together an end-to-end build environment for both the Gateway and the plugin. Bear in mind that runtime environments may have additional restrictions beyond Go version and build flags to which the plugin developer must pay attention. + + Compatibility in general is a big concern when working with Go plugins: as the plugins are tightly coupled to the Gateway, consideration must always be made for the build restrictions enforced by environment and configuration options. + + Continue with [Loading Go Plugins into Tyk](/api-management/plugins/golang#loading-custom-go-plugins-into-tyk). + +### Debugging Golang Plugins + +Plugins are native Go code compiled to a binary shared object file. The code may depend on `cgo` and require libraries like `libc` provided by the runtime environment. The following are some debugging steps for diagnosing issues arising from using plugins. + +#### Warnings + +The [Plugin package - Warnings](https://pkg.go.dev/plugin#hdr-Warnings) section in the Go documentation outlines several requirements which can't be ignored when working with plugins. The most important restriction is the following: + +> Runtime crashes are likely to occur unless all parts of the program (the application and all its plugins) are compiled using exactly the same version of the toolchain, the same build tags, and the same values of certain flags and environment variables. + +#### Using Incorrect Build Flags + +When working with Go plugins, it's easy to miss the restriction that the plugin at the very least must be built with the same Go version, and the same flags (notably `-trimpath`) as the Tyk Gateway on which it is to be used. + +If you miss an argument (for example `-trimpath`) when building the plugin, the Gateway will report an error when your API attempts to load the plugin, for example: + +``` +task: [test] cd tyk-release-5.3.6 && go build -tags=goplugin -trimpath . +task: [test] cd plugins && go build -buildmode=plugin . +task: [test] ./tyk-release-5.3.6/tyk plugin load -f plugins/testplugin.so -s AuthCheck +tyk: error: unexpected error: plugin.Open("plugins/testplugin"): plugin was built with a different version of package internal/goarch, try --help +``` + +Usually when the error hints at a standard library package, the build flags between the Gateway and plugin binaries don't match. + +Other error messages may be reported, depending on what triggered the issue. For example, if you omitted `-race` in the plugin but the gateway was built with `-race`, the following error will be reported: + +``` +plugin was built with a different version of package runtime/internal/sys, try --help +``` + +Strictly speaking: + +- Build flags like `-trimpath`, `-race` need to match. +- Go toolchain / build env needs to be exactly the same. +- For cross compilation you must use the same `CC` value for the build (CGO). +- `CGO_ENABLED=1`, `GOOS`, `GOARCH` must match with runtime. + +When something is off, you can check what is different by using the `go version -m` command for the Gateway (`go version -m tyk`) and plugin (`go version -m plugin.so`). Inspecting and comparing the output of `build` tokens usually yields the difference that caused the compatibility issue. + +#### Plugin Compatibility Issues + +Below are some common situations where dependencies might cause issues: + +- The `Gateway` has a dependency without a `go.mod` file, but the plugin needs to use it. +- Both the `Gateway` and the plugin share a dependency. In this case, the plugin must use the exact same version as the `Gateway`. +- The plugin requires a different version of a shared dependency. + +Here’s how to handle each case: + +**Case 1: Gateway dependency lacks `go.mod`** + +- The plugin depends on the `Gateway`, which uses dependency *A*. +- *A* doesn’t have a `go.mod` file, so a pseudo version is generated during the build. +- Result: The build completes, but the plugin fails to load due to a version mismatch. + +**Solution:** Update the code to remove dependency *A*, or use a version of *A* that includes a `go.mod` file. + +**Case 2: Shared dependency with version matching** + +- The plugin and `Gateway` share a dependency, and this dependency includes a `go.mod` file. +- The version matches, and the dependency is promoted to *direct* in `go.mod`. +- Outcome: You’ll need to keep this dependency version in sync with the `Gateway`. + +**Case 3: Plugin requires a different version of a shared dependency** + +- The plugin and `Gateway` share a dependency, but the plugin needs a different version. +- If the other version is a major release (e.g., `/v4`), it’s treated as a separate package, allowing both versions to coexist. +- If it’s just a minor/patch difference, the plugin will likely fail to load due to a version conflict. + +**Recommendation:** For best results, use Go package versions that follow the Go module versioning (metaversion). However, keep in mind that many `Gateway` dependencies use basic `v1` semantic versioning, which doesn’t always enforce strict versioned import paths. + +#### List plugin symbols + +Sometimes it's useful to list symbols from a plugin. For example, we can list the symbols as they are compiled into our testplugin: + +``` +# nm -gD testplugin.so | grep testplugin +00000000014db4b0 R go:link.pkghashbytes.testplugin +000000000170f7d0 D go:link.pkghash.testplugin +000000000130f5e0 T testplugin.AddFooBarHeader +000000000130f900 T testplugin.AddFooBarHeader.deferwrap1 +000000000130f980 T testplugin.AuthCheck +0000000001310100 T testplugin.AuthCheck.deferwrap1 +000000000130f540 T testplugin.init +0000000001310ce0 T testplugin.init.0 +0000000001ce9580 D testplugin..inittask +0000000001310480 T testplugin.InjectConfigData +0000000001310180 T testplugin.InjectMetadata +0000000001d2a3e0 B testplugin.logger +0000000001310cc0 T testplugin.main +0000000001310820 T testplugin.MakeOutboundCall +0000000001310c40 T testplugin.MakeOutboundCall.deferwrap1 +``` + +This command prints other symbols that are part of the binary. In the worst case, a build compatibility issue may cause a crash in the Gateway due to an unrecoverable error and this can be used to further debug the binaries produced. + +A very basic check to ensure Gateway/plugin compatibility is using the built in `go version -m `: + +``` +[output truncated] + build -buildmode=exe + build -compiler=gc + build -race=true + build -tags=goplugin + build -trimpath=true + build CGO_ENABLED=1 + build GOARCH=amd64 + build GOOS=linux + build GOAMD64=v1 + build vcs=git + build vcs.revision=1db1935d899296c91a55ba528e7b653aec02883b + build vcs.time=2024-09-24T12:54:26Z + build vcs.modified=false +``` + +These options should match between the Gateway binary and the plugin. You can use the command for both binaries and then compare the outputs. + + +## Writing Custom Go Plugins + +Tyk's custom Go plugin middleware is very powerful as it provides you with access to different data types and functionality as explained in this section. + +Golang plugins are a very flexible and powerful way to extend the functionality of Tyk and uses the native Golang plugins API (see [go pkg/plugin docs](https://golang.org/pkg/plugin) for more details). + +Custom Go plugins can access various data objects relating to the API request: + +- [session](/api-management/plugins/golang#accessing-the-session-object): the key session object provided by the client when making the API request +- [API definition](/api-management/plugins/golang#accessing-the-api-definition): the Tyk OAS or Tyk Classic API definition for the requested API + +Custom Go plugins can also [terminate the request](/api-management/plugins/golang#terminating-the-request) and stop further processing of the API request such that it is not sent to the upstream service. + +For more resources for writing plugins, please visit our [Plugin Hub](/api-management/plugins/overview#plugins-hub). +To see an example of a Go plugin, please visit our [Go plugin examples](/api-management/plugins/golang#example-custom-go-plugins) page. + +### Accessing the internal state of a custom plugin + +A Golang plugin can be treated as a normal Golang package but: + +- the package name is always `"main"` and this package cannot be imported +- this package loads at run-time by Tyk and loads after all other Golang packages +- this package has to have an empty `func main() {}`. + +A Go plugin can have a declared `func init()` and it gets called only once (when Tyk loads this plugin for the first time for an API). + +It is possible to create structures or open connections to 3d party services/storage and then share them within every call and export the function in your Golang plugin. + +For example, here is an example of a Tyk Golang plugin with a simple hit counter: + +```go {linenos=true, linenostart=1} +package main + +import ( + "encoding/json" + "net/http" + "sync" + + "github.com/TykTechnologies/tyk/ctx" + "github.com/TykTechnologies/tyk/log" + "github.com/TykTechnologies/tyk/user" +) + +var logger = log.Get() + +// plugin exported functionality +func MyProcessRequest(rw http.ResponseWriter, r *http.Request) { + endPoint := r.Method + " " + r.URL.Path + logger.Info("Custom middleware, new hit:", endPoint) + + hitCounter := recordHit(endPoint) + logger.Debug("New hit counter value:", hitCounter) + + if hitCounter > 100 { + logger.Warning("Hit counter to high") + } + + reply := myReply{ + Session: ctx.GetSession(r), + Endpoint: endPoint, + HitCounter: hitCounter, + } + + jsonData, err := json.Marshal(reply) + if err != nil { + logger.Error(err.Error()) + rw.WriteHeader(http.StatusInternalServerError) + return + } + + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + rw.Write(jsonData) +} + +// called once plugin is loaded, this is where we put all initialisation work for plugin +// i.e. setting exported functions, setting up connection pool to storage and etc. +func init() { + hitCounter = make(map[string]uint64) +} + +// plugin internal state and implementation +var ( + hitCounter map[string]uint64 + hitCounterMu sync.Mutex +) + +func recordHit(endpoint string) uint64 { + hitCounterMu.Lock() + defer hitCounterMu.Unlock() + hitCounter[endpoint]++ + return hitCounter[endpoint] +} + +type myReply struct { + Session *user.SessionState `json:"session"` + Endpoint string `json:"endpoint"` + HitCounter uint64 `json:"hit_counter"` +} + +func main() {} +``` + +Here we see how the internal state of the Golang plugin is used by the exported function `MyProcessRequest` (the one we set in the API spec in the `"custom_middleware"` section). The map `hitCounter` is used to send internal state and count hits to different endpoints. Then our exported Golang plugin function sends an HTTP reply with endpoint hit statistics. + +### Accessing the API definition + +When Tyk passes a request to your plugin, the API definition is made available as part of the request context. + + + +The API definition is accessed differently for Tyk OAS APIs and Tyk Classic APIs, as indicated in the following sections. If you use the wrong call for your API type, it will return `nil`. + + + +#### Working with Tyk OAS APIs + +The API definition can be accessed as follows: + +```go +package main + +import ( + "fmt" + "net/http" + + "github.com/TykTechnologies/tyk/ctx" +) + +func MyPluginFunction(w http.ResponseWriter, r *http.Request) { + oas := ctx.GetOASDefinition(r) + fmt.Println("OAS doc title is", oas.Info.Title) +} + +func main() {} +``` + +The invocation of `ctx.GetOASDefinition(r)` returns an `OAS` object containing the Tyk OAS API definition. +The Go data structure can be found [here](https://github.com/TykTechnologies/tyk/blob/master/apidef/oas/oas.go#L28). + +#### Working with Tyk Classic APIs + +The API definition can be accessed as follows: + +```go +package main + +import ( + "fmt" + "net/http" + + "github.com/TykTechnologies/tyk/ctx" +) + +func MyPluginFunction(w http.ResponseWriter, r *http.Request) { + apidef := ctx.GetDefinition(r) + fmt.Println("API name is", apidef.Name) +} + +func main() {} +``` + +The invocation of `ctx.GetDefinition(r)` returns an APIDefinition object containing the Tyk Classic API Definition. +The Go data structure can be found [here](https://github.com/TykTechnologies/tyk/blob/master/apidef/api_definitions.go#L583). + +### Accessing the session object + +When Tyk passes a request to your plugin, the key session object is made available as part of the request context. This can be accessed as follows: + +```go +package main +import ( + "fmt" + "net/http" + "github.com/TykTechnologies/tyk/ctx" +) +func main() {} +func MyPluginFunction(w http.ResponseWriter, r *http.Request) { + session := ctx.GetSession(r) + fmt.Println("Developer ID:", session.MetaData["tyk_developer_id"] + fmt.Println("Developer Email:", session.MetaData["tyk_developer_email"] +} +``` + + + +Tyk Gateway sets the session in the [Authentication layer](/api-management/traffic-transformation#request-middleware-chain) of the middleware chain. Because of this, the session object does not exist until the middleware chain runs after the authentication middleware. If you call `ctx.GetSession` inside a custom auth plugin, it will always return an empty object. + + + +The invocation of `ctx.GetSession(r)` returns an SessionState object. +The Go data structure can be found [here](https://github.com/TykTechnologies/tyk/blob/master/user/session.go#L106). + +Here is an [example](https://github.com/TykTechnologies/custom-plugin-examples/blob/master/plugins/go-auth-multiple_hook_example/main.go#L135) custom Go plugin that makes use of the session object. + +### Terminating the request + +You can terminate the request within your custom Go plugin and provide an HTTP response to the originating client, such that the plugin behaves similarly to a [virtual endpoint](/api-management/traffic-transformation/virtual-endpoints). + +- the HTTP request processing is stopped and other middleware in the chain won't be used +- the HTTP request round-trip to the upstream target won't happen +- analytics records will still be created and sent to the analytics processing flow + +This [example](/api-management/plugins/golang#using-a-custom-go-plugin-as-a-virtual-endpoint) demonstrates a custom Go plugin configured as a virtual endpoint. + +### Logging from a custom plugin + +Your plugin can write log entries to Tyk's logging system. + +To do so you just need to import the package `"github.com/TykTechnologies/tyk/log"` and use the exported public method `Get()`: + +```go {linenos=true, linenostart=1} +package main + +import ( + "net/http" + + "github.com/TykTechnologies/tyk/log" +) + +var logger = log.Get() + +// AddFooBarHeader adds custom "Foo: Bar" header to the request +func AddFooBarHeader(rw http.ResponseWriter, r *http.Request) { + logger.Info("Processing HTTP request in Golang plugin!!") + r.Header.Add("Foo", "Bar") +} + +func main() {} +``` + +#### Monitoring instrumentation for custom plugins + +All custom middleware implemented as Golang plugins support Tyk's current built in instrumentation. + +The format for an event name with metadata is: `"GoPluginMiddleware:" + Path + ":" + SymbolName`, e.g., for our example, the event name will be: + +```text +"GoPluginMiddleware:/tmp/AddFooBarHeader.so:AddFooBarHeader" +``` + +The format for a metric with execution time (in nanoseconds) will have the same format but with the `.exec_time` suffix: + +```text +"GoPluginMiddleware:/tmp/AddFooBarHeader.so:AddFooBarHeader.exec_time" +``` + +### Creating a custom response plugin + +As explained [here](/api-management/plugins/plugin-types#response-plugins), you can register a custom Go plugin to be triggered in the response middleware chain. You must configure the `driver` field to `goplugin` in the API definition when registering the plugin. + +#### Response plugin method signature + +To write a response plugin in Go you need it to have a method signature as in the example below i.e. `func(http.ResponseWriter, *http.Response, *http.Request)`. +You can then access and modify any part of the request or response. User session and API definition data can be accessed as with other Go plugin hook types. + +```go +package main + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + +) + +// MyPluginResponse intercepts response from upstream +func MyPluginResponse(rw http.ResponseWriter, res *http.Response, req *http.Request) { + // add a header to our response object + res.Header.Add("X-Response-Added", "resp-added") + + // overwrite our response body + var buf bytes.Buffer + buf.Write([]byte(`{"message":"Hi! I'm a response plugin"}`)) + res.Body = ioutil.NopCloser(&buf) + +} + +func main() {} +``` + +## Plugin compiler + +Tyk provides a Plugin Compiler tool that will create a file that can be [loaded into Tyk](/api-management/plugins/golang#loading-custom-go-plugins-into-tyk) to implement your desired custom logic. + + + +The plugin compiler is not supported on Ubuntu 16.04 (Xenial Xerus) as it uses glibc 2.23 which is incompatible with our standard build environment. If you absolutely must have Go plugin support on Xenial, please contact Tyk support. + + + + + +### Compiler options + +Most of the following arguments are applied only to developer flows. These aid development and testing purposes, and support of these varies across releases, due to changes in the Go ecosystem. + +The latest plugin compiler implements the following options: + +- `plugin_name`: output root file name (for example `plugin.so`) +- `build_id`: [optional] provides build uniqueness +- `GOOS`: [optional] override of GOOS (add `-e GOOS=linux`) +- `GOARCH`: [optional] override of GOARCH (add `-e GOARCH=amd64`) + +By default, if `build_id` is not provided, the gateway will not allow the plugin to be loaded twice. This is a restriction of the Go plugins standard library implementation. As long as the builds are made with a unique `build_id`, the same plugin can be loaded multiple times. + +When you provide a unique `build_id` argument, that also enables hot-reload compatibility of your `.so` plugin build, so that you would not need to restart the gateway, only reload it. + +- before 5.1: the plugin would be built in a filesystem path based on `build_id` +- since 5.2.4: the plugin compiler adjusts the Go module in use for the plugin. + +As the plugins are built with `-trimpath`, to omit local filesystem path details and improve plugin compatibility, the plugin compiler relies on the Go module itself to ensure each plugin build is unique. It modifies the plugin build `go.mod` file and imports to ensure a unique build. + +- [plugin package: Warnings](https://pkg.go.dev/plugin#hdr-Warnings) +- [golang#29525 - plugin: cannot open the same plugin with different names](https://github.com/golang/go/issues/29525) + +### Output filename + +Since v4.1.0 the plugin compiler has automatically added the following suffixes to the root filename provided in the `plugin_name` argument: + +- `{Gw-version}`: the Tyk Gateway version, for example, `v5.3.0` +- `{OS}`: the target operating system, for example `linux` +- `{arch}`: the target CPU architecture, for example, `arm64` + +Thus, if `plugin_name` is set to `plugin.so` then given these example values the output file will be: `plugin_v5.3.0_linux_arm64.so`. + +This enables you to have one directory with multiple versions of the same plugin targeting different Gateway versions. + +#### Cross-compiling for different architectures and operating systems + +The Tyk Go Plugin Compiler can generate output for different architectures and operating systems from the one in which the compiler is run (cross-compiling). When you do this, the output filename will be suffixed with the target OS and architecture. + +You simply provide the target `GOOS` and `GOARCH` arguments to the plugin compiler, for example: + +```yaml +docker run --rm -v `pwd`:/plugin-source \ + --platform=linux/amd64 \ + tykio/tyk-plugin-compiler:v5.2.1 plugin.so $build_id linux arm64 +``` + +This command will cross-compile your plugin for a `linux/arm64` architecture. It will produce an output file named `plugin_v5.2.1_linux_arm64.so`. + + + +If you are using the plugin compiler on MacOS, the docker run argument `--platform=linux/amd64` is necessary. The plugin compiler is a cross-build environment implemented with `linux/amd64`. + + + +### Experimental options + +The plugin compiler also supports a set of environment variables being passed: + +- `DEBUG=1`: enables debug output from the plugin compiler process. +- `GO_TIDY=1`: runs go mod tidy to resolve possible dependency issues. +- `GO_GET=1`: invokes go get to retrieve the exact Tyk gateway dependency. + +These environment options are only available in the latest gateway and plugin compiler versions. +They are unsupported and are provided to aid development and testing workflows. + +## Loading Custom Go Plugins into Tyk + +For development purposes, we are going to load the plugin from local file storage. For production, you can use [bundles](#loading-a-tyk-golang-plugin-from-a-bundle) to deploy plugins to multiple gateways. + +In this example we are using a Tyk Classic API. In the API definition find the `custom_middleware` section and make it look similar to the snippet below. Tyk Dashboard users should use RAW API Editor to access this section. + +```json +"custom_middleware": { + "pre": [], + "post_key_auth": [], + "auth_check": {}, + "post": [ + { + "name": "AddFooBarHeader", + "path": "/plugin.so" + } + ], + "driver": "goplugin" +} +``` + +Here we have: + +- `driver` - Set this to `goplugin` (no value created for this plugin) which says to Tyk that this custom middleware is a Golang native plugin. +- `post` - This is the hook name. We use middleware with hook type `post` because we want this custom middleware to process the request right before it is passed to the upstream target (we will look at other types later). +- `post.name` - is your function name from the Go plugin project. +- `post.path` - is the full or relative (to the Tyk binary) path to the built plugin file (`.so`). Make sure Tyk has read access to this file. + +Also, let's set fields `"use_keyless": true` and `"target_url": "http://httpbin.org/"` - for testing purposes. We will test what request arrives to our upstream target and `httpbin.org` is a perfect fit for that. + +The API needs to be reloaded after that change (this happens automatically when you save the updated API in the Dashboard). + +Now your API with its Golang plugin is ready to process traffic: + +```bash +# curl http://localhost:8181/my_api_name/get +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Foo": "Bar", + "Host": "httpbin.org", + "User-Agent": "curl/7.54.0" + }, + "url": "https://httpbin.org/get" +} +``` + +We see that the upstream target has received the header `"Foo": "Bar"` which was added by our custom middleware implemented as a native Golang plugin in Tyk. + +### Updating the plugin + +Loading an updated version of your plugin requires one of the following actions: + +- An API reload with a NEW path or file name of your `.so` file with the plugin. You will need to update the API spec section `"custom_middleware"`, specifying a new value for the `"path"` field of the plugin you need to reload. +- Tyk main process reload. This will force a reload of all Golang plugins for all APIs. + +If a plugin is loaded as a bundle and you need to update it you will need to update your API spec with a new `.zip` file name in the `"custom_middleware_bundle"` field. Make sure the new `.zip` file is uploaded and available via the bundle HTTP endpoint before you update your API spec. + +### Loading a Tyk Golang plugin from a bundle + +Currently we have loaded Golang plugins only directly from the file system. However, when you have multiple gateway instances, you need a more dynamic way to load plugins. Tyk offer bundle instrumentation [Plugin Bundles](/api-management/plugins/overview#plugin-bundles). Using the bundle command creates an archive with your plugin, which you can deploy to the HTTP server (or AWS S3) and then your plugins will be fetched and loaded from that HTTP endpoint. + +You will need to set in `tyk.conf` these two fields: + +- `"enable_bundle_downloader": true` - enables the plugin bundles downloader +- `"bundle_base_url": "http://mybundles:8000/abc"` - specifies the base URL with the HTTP server where you place your bundles with Golang plugins (this endpoint must be reachable by the gateway) + +Also, you will need to specify the following field in your API spec: + +`"custom_middleware_bundle"` - here you place your filename with the bundle (`.zip` archive) to be fetched from the HTTP endpoint you specified in your `tyk.conf` parameter `"bundle_base_url"` + +To load a plugin, your API spec should set this field like so: + +```json +"custom_middleware_bundle": "FooBarBundle.zip" +``` + +Let's look at `FooBarBundle.zip` contents. It is just a ZIP archive with two files archived inside: + +- `AddFooBarHeader.so` - this is our Golang plugin +- `manifest.json` - this is a special file with meta information used by Tyk's bundle loader + +The contents of `manifest.json`: + +```yaml +{ + "file_list": [ + "AddFooBarHeader.so" + ], + "custom_middleware": { + "post": [ + { + "name": "AddFooBarHeader", + "path": "AddFooBarHeader.so" + } + ], + "driver": "goplugin" + }, + + ... +} +``` + +Here we see: + +- field `"custom_middleware"` with exactly the same structure we used to specify `"custom_middleware"` in API spec without bundle +- field `"path"` in section `"post"` now contains just a file name without any path. This field specifies `.so` filename placed in a ZIP archive with the bundle (remember how we specified `"custom_middleware_bundle": "FooBarBundle.zip"`). + +## Using custom Go plugins with Tyk Cloud + +The following supporting resources are provided for developing plugins on Tyk Cloud: + +- [Enabling Plugins On The Control Plane](/tyk-cloud/using-plugins) +- [Uploading Your Plugin Bundle To S3 Bucket](/tyk-cloud/using-plugins#uploading-your-bundle) + +## Example custom Go plugins + +This document provides a working example for providing specific functionality with a custom Go plugin. + +For more resources for writing plugins, please visit our [Plugin Hub](/api-management/plugins/overview#plugins-hub). + +### Using a custom Go plugin as a virtual endpoint + +It is possible to send a response from the Golang plugin custom middleware. In the case that the HTTP response was sent: + +- The HTTP request processing is stopped and other middleware in the chain won't be used. +- The HTTP request round-trip to the upstream target won't happen +- Analytics records will still be created and sent to the analytics processing flow. + +Let's look at an example of how to send an HTTP response from the Tyk Golang plugin. Imagine that we need middleware which would send JSON with the current time if the request contains the parameter `get_time=1` in the request query string: + +```go +package main + +import ( + "encoding/json" + "net/http" + "time" +) + +func SendCurrentTime(rw http.ResponseWriter, r *http.Request) { + // check if we don't need to send reply + if r.URL.Query().Get("get_time") != "1" { + // allow request to be processed and sent to upstream + return + } + + //Prepare data to send + replyData := map[string]interface{}{ + "current_time": time.Now(), + } + + jsonData, err := json.Marshal(replyData) + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + return + } + + //Send HTTP response from the Golang plugin + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + rw.Write(jsonData) +} + +func main() {} +``` + +Let's build the plugin by running this command in the plugin project folder: + +```bash +go build -trimpath -buildmode=plugin -o /tmp/SendCurrentTime.so +``` + +Then let's edit the API spec to use this custom middleware: + +```json +"custom_middleware": { + "pre": [ + { + "name": "SendCurrentTime", + "path": "/tmp/SendCurrentTime.so" + } + ], + "post_key_auth": [], + "auth_check": {}, + "post": [], + "driver": "goplugin" +} +``` + +Let's check that we still perform a round trip to the upstream target if the request query string parameter `get_time` is not set: + +```bash +# curl http://localhost:8181/my_api_name/get +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Host": "httpbin.org", + "User-Agent": "curl/7.54.0" + }, + "url": "https://httpbin.org/get" +} +``` + +Now let's check if our Golang plugin sends an HTTP 200 response (with JSON containing current time) when we set `get_time=1` query string parameter: + +```bash +# curl http://localhost:8181/my_api_name/get?get_time=1 +{"current_time":"2019-09-11T23:44:10.040878-04:00"} +``` + +Here we see that: + +- We've got an HTTP 200 response code. +- The response body has a JSON payload with the current time. +- The upstream target was not reached. Our Tyk Golang plugin served this request and stopped processing after the response was sent. + +### Performing custom authentication with a Golang plugin + +You can implement your own authentication method, using a Golang plugin and custom `"auth_check"` middleware. Ensure you set the two fields in Post Authentication Hook. + +Let's have a look at the code example. Imagine we need to implement a very trivial authentication method when only one key is supported (in the real world you would want to store your keys in some storage or have some more complex logic). + +```go +package main + +import ( + "net/http" + + "github.com/TykTechnologies/tyk/ctx" + "github.com/TykTechnologies/tyk/headers" + "github.com/TykTechnologies/tyk/user" +) + +func getSessionByKey(key string) *user.SessionState { + //Here goes our logic to check if the provided API key is valid and appropriate key session can be retrieved + + // perform auth (only one token "abc" is allowed) + if key != "abc" { + return nil + } + + // return session + return &user.SessionState{ + OrgID: "default", + Alias: "abc-session", + } +} + +func MyPluginAuthCheck(rw http.ResponseWriter, r *http.Request) { + //Try to get a session by API key + key := r.Header.Get(headers.Authorization) + session := getSessionByKey(key) + if session == nil { + // auth failed, reply with 403 + rw.WriteHeader(http.StatusForbidden) + return + } + + // auth was successful, add the session to the request's context so other middleware can use it + ctx.SetSession(r, session, true) + + // if compiling on a version older than 4.0.1, use this instead + // ctx.SetSession(r, session, key, true) +} + +func main() {} +``` + +A couple of notes about this code: + +- the package `"github.com/TykTechnologies/tyk/ctx"` is used to set a session in the request context - this is something `"auth_check"`-type custom middleware is responsible for. +- the package `"github.com/TykTechnologies/tyk/user"` is used to operate with Tyk's key session structure. +- our Golang plugin sends a 403 HTTP response if authentication fails. +- our Golang plugin just adds a session to the request context and returns if authentication was successful. + +Let's build the plugin by running the following command in the folder containing your plugin project: + +```bash +go build -trimpath -buildmode=plugin -o /tmp/MyPluginAuthCheck.so +``` + +Now let's check if our custom authentication works as expected (only one key `"abc"` should work). + +Authentication will fail with the wrong API key: + +```bash +# curl -v -H "Authorization: xyz" http://localhost:8181/my_api_name/get +* Trying ::1... +* TCP_NODELAY set +* Connected to localhost (::1) port 8181 (#0) +> GET /my_api_name/get HTTP/1.1 +> Host: localhost:8181 +> User-Agent: curl/7.54.0 +> Accept: */* +> Authorization: xyz +> +< HTTP/1.1 403 Forbidden +< Date: Wed, 11 Sep 2019 04:31:34 GMT +< Content-Length: 0 +< +* Connection #0 to host localhost left intact +``` + +Here we see that our custom middleware replied with a 403 response and request processing was stopped at this point. + +Authentication successful with the right API key: + +```bash +# curl -v -H "Authorization: abc" http://localhost:8181/my_api_name/get +* Trying ::1... +* TCP_NODELAY set +* Connected to localhost (::1) port 8181 (#0) +> GET /my_api_name/get HTTP/1.1 +> Host: localhost:8181 +> User-Agent: curl/7.54.0 +> Accept: */* +> Authorization: abc +> +< HTTP/1.1 200 OK +< Content-Type: application/json +< Date: Wed, 11 Sep 2019 04:31:39 GMT +< Content-Length: 257 +< +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Authorization": "abc", + "Host": "httpbin.org", + "User-Agent": "curl/7.54.0" + }, + "url": "https://httpbin.org/get" +} +* Connection #0 to host localhost left intact +``` + +Here we see that our custom middleware successfully authenticated the request and we received a reply from the upstream target. + +## Upgrading your Tyk Gateway + +When upgrading your Tyk Gateway deployment, you need to re-compile your plugin with the new version. At the moment of loading a plugin, the Gateway will try to find a plugin with the name provided in the API definition. If none is found then it will fall back to search the plugin file with the name: `{plugin-name}_{Gw-version}_{OS}_{arch}.so`. + +Since Tyk v4.1.0, the compiler [automatically](/api-management/plugins/golang#output-filename) creates plugin files following this convention so when you upgrade, say from Tyk v5.2.5 to v5.3.0 you only need to have the plugins compiled for v5.3.0 before performing the upgrade. + +This diagram shows how every Tyk Gateway will search and load the plugin binary that it is compatible with. +APIs Menu diff --git a/api-management/plugins/javascript.mdx b/api-management/plugins/javascript.mdx new file mode 100644 index 000000000..75197b245 --- /dev/null +++ b/api-management/plugins/javascript.mdx @@ -0,0 +1,720 @@ +--- +title: "Javascript Plugins" +description: "How to manage users, teams, permissions, rbac in Tyk Dashboard" +sidebarTitle: "Javscript Plugins" +--- + +## Introduction + +There are three middleware components that can be scripted with Tyk: + +1. **Custom JavaScript plugins**: These execute either *pre* or *post* validation. A *pre* middleware component will execute before any session validation or token validation has taken place, while a *post* middleware component will execute after the request has been passed through all checks and is ready to be proxied upstream. + +2. **Dynamic event handlers**: These components fire on certain API events (see the event handlers section), these are fired Async and do not have a cooldown timer. These are documented [here](/api-management/gateway-events#set-up-a-webhook-event-handler-in-the-tyk-oas-api-definition). + +3. **Virtual endpoints**: These are powerful programmable middleware invoked towards the end of the request processing chain. Unlike the custom JavaScript plugins, the virtual endpoint terminates the request. These are documented [here](/api-management/traffic-transformation/virtual-endpoints). + +The JavaScript (JS) [scripting guide](/api-management/plugins/javascript#using-javascript-with-tyk) provides details of how to access dynamic data (such as the key session object) from your JS functions. Combining these resources provides you with a powerful set of tools for shaping and structuring inbound traffic to your API. + +### Declared plugin functions + +JavaScript functions are available globally in the same namespace. So, if you include two or more JSVM plugins that call the same function, the last declared plugin implementation of the function will be returned. + +### Enabling the JavaScript Virtual Machine (JSVM) + +The JavaScript Virtual Machine (JSVM) provided in the Gateway is a traditional ECMAScript5 compatible environment. + +Before you can use JavaScript customization in any component you will need to enable the JSVM. + +You do this by setting `enable_jsvm` to `true` in your `tyk.conf` [file](/tyk-oss-gateway/configuration#enable_jsvm). + +### Installing JavaScript middleware + +Installing middleware is different for different Tyk deployments, for example, in Tyk OSS it is possible to directly specify a path to a file in the API Definition, while in Tyk Self-Managed, we recommend using a directory-based loader. + +We've provided the following guides: + +- [Tyk OSS](/api-management/plugins/javascript#installing-middleware-on-tyk-oss) +- [Tyk Self-Managed](/api-management/plugins/javascript#installing-middleware-on-tyk-self-managed) +- [Tyk Hybrid](/api-management/plugins/javascript#installing-middleware-on-tyk-hybrid) + + + + + Tyk Cloud Classic does not support custom middleware. + + +## Using JavaScript with Tyk + +Tyk's JavaScript Virtual Machine (JSVM) provides a serverless compute function that allows for the execution of custom logic directly within the gateway itself. This can be accessed from [multiple locations](/api-management/plugins/javascript#) in the API processing chain and allows significant customization and optimization of your request handling. + +In this guide we will cover the features and resources available to you when creating custom functions, highlighting where there are limitations for the different middleware stages. + +### Scripting basics + +Here we cover various facets that you need to be aware of when creating custom functions for Tyk. + +#### Accessing external and dynamic data + +JS functions can be given access to external data objects relating to the API request. These allow for the modification of both the request itself and the session: + +- `request`: an [object](/api-management/plugins/javascript#the-request-object) describing the API request that invoked the middleware +- `session`: the key session [object](/api-management/plugins/javascript#the-session-object) provided by the client when making the API request +- `config`: an [object](/api-management/plugins/javascript#the-config-object) containing fields from the API definition + + + + + There are other ways of accessing and editing a session object using the [Tyk JavaScript API functions](/api-management/plugins/javascript#working-with-the-key-session-object). + + + +#### Creating a middleware component + +Tyk injects a `TykJS` namespace into the JSVM, which can be used to initialise a new middleware component. The JS for each middleware component should be in its own `*.js` file. + +You create a middleware object by calling the `TykJS.TykMiddleware.NewMiddleware({})` constructor with an empty object and then initialising it with your function using the `NewProcessRequest()` closure syntax. This is where you expose the [external data objects](/api-management/plugins/javascript#accessing-external-and-dynamic-data) to your custom function. + + + +- For Custom JS plugins and Dynamic Event Handlers, the source code filename must match the function name +- Virtual Endpoints do not have this limitation + + + +#### Returning from the middleware + +When returning from the middleware, you provide specific return data depending upon the type of middleware. + +##### Returning from Custom JS plugin + +A custom JS plugin can modify fields in the API request and the session metadata, however this is not performed directly within the JSVM so the required updates must be passed out of the JSVM for Tyk to apply the changes. This is a requirement and omitting them can cause the middleware to fail. + +The JS function must provide the `request` and `session.meta_data` objects in the `ReturnData` as follows: + +```js +return sampleMiddleware.ReturnData(request, session.meta_data); +``` + +Custom JS plugins sit in the [middleware processing chain](/api-management/traffic-transformation#request-middleware-chain) and pass the request onto the next middleware before it is proxied to the upstream. If required, however, a custom JS plugin can terminate the request and provide a custom response to the client if you configure the `ReturnOverrides` in the `request` object, as described [here](/api-management/plugins/javascript#using-returnoverrides). + +##### Returning from Virtual Endpoint + +Unlike custom JS plugins, Virtual Endpoints always [terminate the request](/api-management/traffic-transformation/virtual-endpoints#working) so have a different method of returning from the JS function. + +The function must return a `responseObject`. This is crucial as it determines the HTTP response that will be sent back to the client. The structure of this object is defined to ensure that the virtual endpoint can communicate the necessary response details back to the Tyk Gateway, which then forwards it to the client. + +The `responseObject` has the following structure: + +- `code`: an integer representing the HTTP status code of the response +- `headers`: an object containing key-value pairs representing the HTTP headers of the response +- `body`: a string that represents the body of the response which can be plain text, JSON, or XML, depending on what your API client expects to receive + +You must provide the `responseObject` together with the `session.meta_data` as parameters in a call to `TykJsResponse` as follows: + +```js +return TykJsResponse(responseObject, session.meta_data); +``` + +You can find some examples of how this works [here](/api-management/traffic-transformation/virtual-endpoints#examples). + +### JavaScript resources + +JavaScript (JS) functions have access to a [system API](/api-management/plugins/javascript#javascript-api) and [library of functions](/api-management/plugins/javascript#underscorejs-library). They can also be given access to certain Tyk data objects relating to the API request. + +The system API provides access to resources outside of the JavaScript Virtual Machine sandbox, the ability to make outbound HTTP requests and access to the key management REST API functions. + +#### The `request` object + +The `request` object provides a set of arrays that describe the API request. These can be manipulated and, when changed, will affect the request as it passes through the middleware pipeline. For [virtual endpoints](/api-management/traffic-transformation/virtual-endpoints) the request object has a [different structure](#VirtualEndpoint-Request). + +The structure of the `request` object is: + +```typesecript +class ReturnOverrides { + ResponseCode: number = 200; + ResponseBody: string = ""; + ResponseHeaders: string[] = []; +} + +class Request { + Headers: { [key: string]: string[] } = {}; + SetHeaders: { [key: string]: string } = {}; + DeleteHeaders: string[] = []; + Body: string = ""; + URL: string = ""; + AddParams: { [key: string]: string } = {}; + DeleteParams: string[] = []; + ReturnOverrides: ReturnOverrides = new ReturnOverrides(); + IgnoreBody: boolean = false; + Method: string = ""; + RequestURI: string = ""; + Scheme: string = ""; +} +``` + +{/* ```go +struct { + Headers map[string][]string + SetHeaders map[string]string + DeleteHeaders []string + Body string + URL string + AddParams map[string]string + DeleteParams []string + ReturnOverrides { + ResponseCode: int + ResponseBody: string + ResponseHeaders []string + } + IgnoreBody bool + Method string + RequestURI string + Scheme string +} +``` */} + +- `Headers`: this is an object of string arrays, and represents the current state of the request header; this object cannot be modified directly, but can be used to read header data +- `SetHeaders`: this is a key-value map that will be set in the header when the middleware returns the object; existing headers will be overwritten and new headers will be added +- `DeleteHeaders`: any header name that is in this list will be deleted from the outgoing request; note that `DeleteHeaders` happens before `SetHeaders` +- `Body`: this represents the body of the request, if you modify this field it will overwrite the request +- `URL`: this represents the path portion of the outbound URL, you can modify this to redirect a URL to a different upstream path +- `AddParams`: you can add parameters to your request here, for example internal data headers that are only relevant to your network setup +- `DeleteParams`: these parameters will be removed from the request as they pass through the middleware; note `DeleteParams` happens before `AddParams` +- `ReturnOverrides`: values stored here are used to stop or halt middleware execution and return an error code +- `IgnoreBody`: if this parameter is set to `true`, the original request body will be used; if set to `false` the `Body` field will be used (`false` is the default behavior) +- `Method`: contains the HTTP method (`GET`, `POST`, etc.) +- `RequestURI`: contains the request URI, including the query string, e.g. `/path?key=value` +- `Scheme`: contains the URL scheme, e.g. `http`, `https` + + +##### Using `ReturnOverrides` + +If you configure values in `request.ReturnOverrides` then Tyk will terminate the request and provide a response to the client when the function completes. The request will not be proxied to the upstream. + +The response will use the parameters configured in `ReturnOverrides`: + +- `ResponseCode` +- `ResponseBody` +- `ResponseHeaders` + +In this example, if the condition is met, Tyk will return `HTTP 403 Access Denied` with the custom header `"X-Error":"the-condition"`: + +```js +var testJSVMData = new TykJS.TykMiddleware.NewMiddleware({}); + +testJSVMData.NewProcessRequest(function(request, session, config) { + // Logic to determine if the request should be overridden + if (someCondition) { + request.ReturnOverrides.ResponseCode = 403; + request.ReturnOverrides.ResponseBody = "Access Denied"; + request.ReturnOverrides.headers = {"X-Error": "the-condition"}; + // This stops the request from proceeding to the upstream + } + return testJSVMData.ReturnData(request, session.meta_data); +}); +``` + +##### The virtual endpoint `request` object + + +For [virtual endpoint](/api-management/traffic-transformation/virtual-endpoints) functions the structure of a Javascript `request` object is: + +```typescript +class VirtualEndpointRequest { + Body: string = ""; + Headers: { [key: string]: string[] } = {}; + Params: { [key: string]: string[] } = {}; + Scheme: string = ""; + URL: string = ""; +} +``` + +- `Body`: HTTP request body, e.g. `""` +- `Headers`: HTTP request headers, e.g. `"Accept": ["*/*"]` +- `Params`: Decoded query and form parameters, e.g. `{ "confirm": ["true"], "userId": ["123"] }` +- `Scheme`: The scheme of the URL ( e.g. `http` or `https`) +- `URL`: The full URL of the request, e.g `/vendpoint/anything?user_id=123\u0026confirm=true` + +
+ + + +Each query and form parameter within the request is stored as an array field in the `Params` field of the request object. + +Repeated parameter assignments are appended to the corresponding array. For example, a request against `/vendpoint/anything?user_id[]=123&user_id[]=234` would result in a Javascript request object similar to that shown below: + +```javascript +const httpRequest = { + Headers: { + "Accept": ["*/*"], + "User-Agent": ["curl/8.1.2"] + }, + Body: "", + URL: "/vendpoint/anything?user_id[]=123\u0026user_id[]=234", + Params: { + "user_id[]": ["123", "234"] + }, + Scheme: "http" +}; +``` + + + +#### The `session` object + +Tyk uses an internal [session object](/api-management/policies#what-is-a-session-object) to handle the quota, rate limits, access allowances and auth data of a specific key. JS middleware can be granted access to the session object but there is also the option to disable it as deserialising it into the JSVM is computationally expensive and can add latency. Other than the `meta_data` field, the session object itself cannot be directly edited as it is crucial to the correct functioning of Tyk. + +##### Limitations + +- Custom JS plugins at the [pre-](/api-management/plugins/plugin-types#request-plugins) stage do not have access to the session object (as it has not been created yet) +- When scripting for Virtual Endpoints, the `session` data will only be available to the JS function if enabled in the middleware configuration. + +##### Sharing data between middleware using the `session` object + +For different middleware to be able to transfer data between each other, the session object makes available a `meta_data` key/value field that is written back to the session store (and can be retrieved by the middleware down the line) - this data is permanent, and can also be retrieved by the REST API from outside of Tyk using the `/tyk/keys/` method. + + + +A new JSVM instance is created for *each* API that is managed. Consequently, inter-API communication is not possible via shared methods, since they have different bounds. However, it *is* possible using the session object if a key is shared across APIs. + + + +#### The `config` object + +The third Tyk data object that is made available to the script running in the JSVM contains data from the API Definition. This is read-only and cannot be modified by the JS function. The structure of this object is: + +- `APIID`: the unique identifier for the API +- `OrgID`: the organization identifier +- `config_data`: custom attributes defined in the API description + +##### Adding custom attributes to the API Definition + +When working with Tyk OAS APIs, you can add custom attributes in the `data` object in the `x-tyk-api-gateway.middleware.global.pluginConfig` section of the API definition, for example: + +```json {linenos=true, linenostart=1} +{ + "x-tyk-api-gateway": { + "middleware": { + "global": { + "pluginConfig": { + "data": { + "enabled": true, + "value": { + "foo": "bar" + } + } + } + } + } + } +} +``` + +When working with Tyk Classic APIs, you simply add the attributes in the `config_data` object in the root of the API definition: + +```json {linenos=true, linenostart=1} +{ + "config_data": { + "foo": "bar" + } +} +``` + +#### Underscore.js Library + +In addition to our Tyk JavaScript API functions, you also have access to all the functions from the [underscore](http://underscorejs.org) library. + +Underscore.js is a JavaScript library that provides a lot of useful functional programming helpers without extending any built-in objects. Underscore provides over 100 functions that support your favorite functional helpers: + +- map +- filter +- invoke + +There are also more specialized goodies, including: + +- function binding +- JavaScript templating +- creating quick indexes +- deep equality testing + +### Example + +In this basic example, we show the creation and initialisation of a middleware object. Note how the three Tyk data objects (`request`, `session`, `config`) are made available to the function and the two objects that are returned from the function (in case the external objects need to be updated). + +```js {linenos=true, linenostart=1} +/* --- sampleMiddleware.js --- */ + +// Create new middleware object +var sampleMiddleware = new TykJS.TykMiddleware.NewMiddleware({}); + +// Initialise the object with your functionality by passing a closure that accepts +// two objects into the NewProcessRequest() function: +sampleMiddleware.NewProcessRequest(function(request, session, config) { + log("This middleware does nothing, but will print this to your terminal.") + + // You MUST return both the request and session metadata + return sampleMiddleware.ReturnData(request, session.meta_data); +}); +``` + +## JavaScript API + +This system API provides access to resources outside of the JavaScript Virtual Machine sandbox, the ability to make outbound HTTP requests and access to the key management REST API functions. + +Embedded JavaScript interpreters offer the bare bones of a scripting language, but not necessarily the functions that you would expect, especially with JavaScript, where objects such as `XMLHttpRequest()` are a given. However, those interfaces are actually provided by the browser / DOM that the script engine are executing in. In a similar vein, we have included a series of functions to the JSVM for convenience and give the interpreter more capability. + +This list is regularly revised and any new suggestions should be made in our [Github issue tracker](https://github.com/TykTechnologies/tyk/issues). + +Below is the list of functions currently provided by Tyk. + +- `log(string)`: Calling `log("this message")` will cause Tyk to log the string to Tyk's default logger output in the form `JSVM Log: this message` as an INFO statement. This function is especially useful for debugging your scripts. It is recommended to put a `log()` call at the end of your middleware and event handler module definitions to indicate on load that they have been loaded successfully - see the [example scripts](https://github.com/TykTechnologies/tyk/tree/master/middleware) in your Tyk installation `middleware` directory for more details. +- `rawlog(string)`: Calling `rawlog("this message")` will cause Tyk to log the string to Tyk's default logger output without any additional formatting, like adding prefix or date. This function can be used if you want to have own log format, and parse it later with custom tooling. +- `b64enc` - Encode string to base64 +- `b64dec` - Decode base64 string +- `TykBatchRequest` this function is similar to `TykMakeHttpRequest` but makes use of Tyk's [batch request feature](/api-management/batch-processing). +- `TykMakeHttpRequest(JSON.stringify(requestObject))`: This method is used to make an HTTP request, requests are encoded as JSON for deserialisation in the min binary and translation to a system HTTP call. The request object has the following structure: + +```js +newRequest = { + "Method": "POST", + "Body": JSON.stringify(event), + "Headers": {}, + "Domain": "http://foo.com", + "Resource": "/event/quotas", + "FormData": {"field": "value"} +}; +``` + + + +If you want to include querystring values, add them as part of the `Domain` property. + + + +Tyk passes a simplified response back which looks like this: + +```go +type TykJSHttpResponse struct { + Code int + Body string + Headers map[string][]string +} +``` + +The response is JSON string encoded, and so will need to be decoded again before it is usable: + +```js +usableResponse = JSON.parse(response); +log("Response code: " + usableResponse.Code); +log("Response body: " + usableResponse.Body); +``` + +This method does not execute asynchronously, so execution will block until a response is received. + +### Working with the key session object + +To work with the key session object, two functions are provided: `TykGetKeyData` and `TykSetKeyData`: + +- `TykGetKeyData(api_key, api_id)`: Use this method to retrieve a [session object](/api-management/policies#what-is-a-session-object) for the key and the API provided: + + ```js + // In an event handler, we can get the key idea from the event, and the API ID from the context variable. + var thisSession = JSON.parse(TykGetKeyData(event.EventMetaData.Key, context.APIID)) + log("Expires: " + thisSession.expires) + ``` + +- `TykSetKeyData(api_key, api_id)`: Use this method to write data back into the Tyk session store: + + ```js + // You can modify the object just like with the REST API + thisSession.expires = thisSession.expires + 1000; + + // Use TykSetKeyData to set the key data back in the session store + TykSetKeyData(event.EventMetaData.Key, JSON.stringify(thisSession)); + ``` + +All of these methods are described in functional examples in the Tyk `middleware/` and `event_handlers/` folders. + +## Installing Middleware on Tyk Self-Managed + +In some cases middleware references can't be directly embedded in API Definitions (for example, when using the Tyk Dashboard in an Self-Managed installation). However, there is an easy way to distribute and enable custom middleware for an API in a Tyk node by adding them as a directory structure. + +Tyk will load the middleware plugins dynamically on host-reload without needing a direct reference to them in the API Definition. + +The directory structure should look like this: + +```text +middleware + / {API Id} + / pre + / {middlewareObject1Name}.js + / {middlewareObject2Name}.js + / post + / {middlewareObject1Name}_with_session.js + / {middlewareObject2Name}.js +``` + +Tyk will check for a folder that matches the `API Id` being loaded, and then load the `pre` and `post` middleware from the respective directories. + + + +The filename MUST match the object to be loaded exactly. + + + +If your middleware requires session injection, then append `_with_session` to the filename. + +### Enable the JSVM + +Before you can use Javascript Middleware you will need to enable the JSVM. + +You can do this by setting `enable_jsvm` to `true` in your `tyk.conf` file. + +## Installing Middleware on Tyk Hybrid + +In some cases middleware references can't be directly embedded in API Definitions (for example, when using the dashboard in a Hybrid install). However, there is an easy way to distribute and enable custom middleware for an API on a Tyk node. + +A second method of loading API Definitions in Tyk nodes is to add them as a directory structure in the Tyk node. Tyk will load the middleware plugins dynamically on host-reload without needing a direct reference to them in the API Definition. + +The directory structure looks as follows: + +```text +middleware + / {API Id} + / pre + / {middlewareObject1Name}.js + / {middlewareObject2Name}.js + / post + / {middlewareObject1Name}_with_session.js + / {middlewareObject2Name}.js +``` + +Tyk will check for a folder that matches the `{API Id}` being loaded, and then load the `pre` and `post` middleware from the respective folders. + + + +The filename MUST match the object to be loaded exactly. + + + +If your middleware requires session injection, then append `_with_session` to the filename. + +### Enable the JSVM + +Before you can use Javascript Middleware you will need to enable the JSVM + +You can do this by setting `enable_jsvm` to `true` in your `tyk.conf` file. + +## Installing Middleware on Tyk OSS + +In order to activate middleware when using Tyk OSS or when using a file-based setup, the middleware needs to be registered as part of your API Definition. Registration of middleware components is relatively simple. + + + +It is important that your object names are unique. + + + + + +This functionality may change in subsequent releases. + + + +### Enable the JSVM + +Before you can use Javascript Middleware you will need to enable the JSVM + +You can do this by setting `enable_jsvm` to `true` in your `tyk.conf` file. + +Adding the middleware plugin is as simple as adding it to your definition file in the middleware sections: + +```json +... +"event_handlers": {}, +"custom_middleware": { + "driver": "otto", + "pre": [ + { + "name": "sampleMiddleware", + "path": "middleware/sample.js", + "require_session": false + } + ], + "post": [ + { + "name": "sampleMiddleware", + "path": "middleware/sample.js", + "require_session": false + } + ] +}, +"enable_batch_request_support": false, +... +``` + +As you can see, the parameters are all dynamic, so you will need to ensure that the path to your middleware is correct. The configuration sections are as follows: + +- `pre`: Defines a list of custom middleware objects to run *in order* from top to bottom. That will be executed *before* any authentication information is extracted from the header or parameter list of the request. Use middleware in this section to pre-process a request before feeding it through the Tyk middleware. + +- `pre[].name`: The name of the middleware object to call. This is case sensitive, and **must** match the name of the middleware object that was created, so in our example - we created `sampleMiddleware` by calling: + + `var sampleMiddleware = new TykJS.TykMiddleware.NewMiddleware({});` + +- `pre[].path`: The path to the middleware component, this will be loaded into the JSVM when the API is initialised. This means that if you reload an API configuration, the middleware will also be re-loaded. You can hot-swap middleware on reload with no service interruption. + +- `pre[].require_session`: Irrelevant for pre-processor middleware, since no auth data has been extracted by the authentication middleware, it cannot be made available to the middleware. + +- `post`: Defines a list of custom middleware objects to run *in order* from top to bottom. That will be executed *after* the authentication, validation, throttling, and quota-limiting middleware has been executed, just before the request is proxied upstream. Use middleware in this section to post-process a request before sending it to your upstream API. + +- `post[].name`: The name of the middleware object to call. This is case sensitive, and **must** match the name of the middleware object that was created, so in our example - we created `sampleMiddleware` by calling: + + `var sampleMiddleware = new TykJS.TykMiddleware.NewMiddleware({});` + +- `post[].path`: The path to the middleware component, this will be loaded into the JSVM when the API is initialised. This means that if you reload an API configuration, the middleware will also be re-loaded. You can hot-swap middleware on reload with no service interruption. + +- `post[].require_session`: Defaults to `false`, if you require access to the session object, it will be supplied as a `session` variable to your middleware processor function. + +## WAF (OSS) ModSecurity Plugin example + +Traditionally, a Web Application Firewall (WAF) would be the first layer requests would hit, before reaching the API gateway. This is not possible if the Gateway has to terminate SSL, for things such as mTLS. + +So what do you do if you still want to run your requests through a WAF to automatically scan for malicious action? We incorporate a WAF as part of the request lifecycle by using Tyk's plugin architecture. + +### Prerequisites + +* Already running Tyk - Community Edition or Pro +* Docker, to run the WAF + +### Disclaimer + +This is NOT a production ready plugin because + +* The JavaScript plugin creates a new connection with the WAF for every request +* The request is not sent over SSL +* The WAF is only sent the query params for inspection. + +For higher performance, the plugin could be written in Golang, and a connection pool would be opened and maintained over SSL + +### Steps for Configuration + +1. **Turn JSVM on your `tyk.conf` at the root level:** + + Turn on JSVM interpreter to allow Tyk to run JavaScript plugins. + + ``` + "enable_jsvm": true + ``` + +2. **Place the JavaScript plugin on Tyk file system** + + Copy the JS Plugin as a local .js file to the Gateway's file system. + + From the Gateway root, this will download the plugin called `waf.js` into the `middleware` directory: + ``` + curl https://raw.githubusercontent.com/TykTechnologies/custom-plugins/master/plugins/js-pre-post-waf/waf.js | cat > middleware/waf.js + ``` + + (Instructions) + If you are running Tyk in Docker, you can get into Tyk Gateway with `docker exec` + ``` + $ docker ps | grep gateway + 670039a3e0b8 tykio/tyk-gateway:latest "./entrypoint.sh" 14 minutes ago Up 14 minutes 0.0.0.0:8080->8080/tcp tyk-demo_tyk-gateway_1 + + ## copy container name or ID + $ docker exec -it 670039a3e0b8 bash + + ## Now SSH'd into Tyk Gateway container and can perform curl + root@670039a3e0b8:/opt/tyk-gateway# ls + + apps entrypoint.sh install middleware templates tyk-gateway.pid tyk.conf.example + coprocess event_handlers js policies tyk tyk.conf utils + + ## Download the plugin + root@670039a3e0b8:/opt/tyk-gateway# curl https://raw.githubusercontent.com/TykTechnologies/custom-plugins/master/plugins/js-pre-post-waf/waf.js | cat > middleware/waf.js + + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 100 1125 100 1125 0 0 3906 0 --:--:-- --:--:-- --:--:-- 3975 + + ``` + + [waf.js source](https://raw.githubusercontent.com/TykTechnologies/custom-plugins/master/plugins/js-pre-post-waf/waf.js) + +3. **Import API definition into Tyk** + + Copy the following Tyk API definition and import it into your environment. + + [API Definition JSON](https://raw.githubusercontent.com/TykTechnologies/custom-plugins/master/plugins/js-pre-post-waf/apidef.json) + + Here's the important section which adds the plugin to the request lifecycle for this API: + ```{.json} + "custom_middleware": { + "pre": [ + { + "name": "Waf", + "path": "./middleware/waf.js" + } + ], + ``` + + ##### How to Import? + [Tyk Self-Managed](/api-management/gateway-config-managing-classic#import-an-api) + + [Tyk OSS](/api-management/gateway-config-managing-classic#create-an-api) + +4. **Run WAF ModSecurity Using Docker** + + First run ModSecurity with the popular [Core RuleSet](https://coreruleset.org/) in Docker + ``` + $ docker run -ti -p 80:80 -e PARANOIA=1 --rm owasp/modsecurity-crs:v3.0 + ``` + + Open a second terminal and curl it + ``` + $ curl localhost + + hello world + ``` + + We should see the request show in the WAF server: + + ``` + 172.17.0.1 - - [30/Jun/2020:00:56:42 +0000] "GET / HTTP/1.1" 200 12 + ``` + + Now try a dirty payload: + ``` + $ curl 'localhost/?param=">' + + + + 403 Forbidden + +

Forbidden

+

You don't have permission to access / + on this server.
+

+ + ``` + + Our WAF catches the response and returns a `403`. + + + Now we try through Tyk. + + ``` + ## Clean requests, should get response from upstream's IP endpoint + $ curl localhost:8080/waf/ip + + { + "origin": "172.30.0.1, 147.253.129.30" + } + + ## WAF will detect malicious payload and instruct Tyk to deny + $ curl 'localhost:8080/waf/ip?param="> + { + "error": "Bad request!" + } + ``` diff --git a/api-management/plugins/overview.mdx b/api-management/plugins/overview.mdx new file mode 100644 index 000000000..6ee91498b --- /dev/null +++ b/api-management/plugins/overview.mdx @@ -0,0 +1,1203 @@ +--- +title: "Custom Plugins" +description: "Learn how to extend Tyk's capabilities using custom plugins to enhance API functionality." +sidebarTitle: "Overview" +--- + +import { ResponsiveGrid } from '/snippets/ResponsiveGrid.mdx'; + +## Introduction + +Plugins can be used to customize and enhance the capabilities of your APIs through integration with external services and databases to perform operations such as data transformation, custom authentication, logging and monitoring etc. + +When Tyk receives an API request, it works through a [chain](/api-management/traffic-transformation#request-middleware-chain) of processing *middleware* that is configured using the API definition. There are a large number of built-in middleware in the processing chain that are dedicated to performing [client authentication](/api-management/client-authentication), [request transformation](/api-management/traffic-transformation), [caching](/api-management/response-caching) and many other processes before proxying the request to the upstream. + +Tyk's custom plugin facility provides a powerful and flexible way to extend the middleware chain. It allows API developers to write custom middleware, in various programming languages, that can perform additional processing of requests and responses. + +For example, a custom authentication scheme can be implemented and executed on API requests, custom plugins can be used to provide integration with external services and databases, or additional processing can be performed on the response returned from the upstream. + +There are several different stages of the [API request lifecycle](/api-management/traffic-transformation#request-middleware-chain) where custom plugins can be attached (or *hooked*) into the middleware chain allowing significant customization to meet your specific requirements. + +Custom plugins are usually referred to by the location where they can be *hooked* into the middleware processing chain as follows: + +1. [Pre (Request)](/api-management/plugins/plugin-types#request-plugins) +2. [Authentication](/api-management/plugins/plugin-types#authentication-plugins) +3. [Post-Auth (Request)](/api-management/plugins/plugin-types#request-plugins) +4. [Post (Request)](/api-management/plugins/plugin-types#request-plugins) +5. [Response](/api-management/plugins/plugin-types#response-plugins) +6. [Analytics (Response)](/api-management/plugins/plugin-types#analytics-plugins) + +## How Plugin Works + +The diagram below illustrates a high level architectural overview for how Tyk Gateway interacts with plugins. + +plugins overview + +From the above illustration it can be seen that: + +1. The client sends a request to an API served by Tyk Gateway. +2. Tyk processes the request and forwards it to one or more plugins implemented and configured for that API. +3. A plugin performs operations (e.g., custom authentication, data transformation). +4. The processed request is then returned to Tyk Gateway, which forwards it upstream. +5. Finally, the upstream response is sent back to the client. + +## Types of Plugin + +Tyk supports four types of plugins: + +1. **[Request Plugin](/api-management/plugins/plugin-types#request-plugins)** +2. **[Authentication Plugin](/api-management/plugins/plugin-types#authentication-plugins)** +3. **[Response Plugin](/api-management/plugins/plugin-types#response-plugins)** +4. **[Analytics Plugin](/api-management/plugins/plugin-types#analytics-plugins)** + +To know more about plugin types and it's advanced configuration, refer the following [docs](/api-management/plugins/plugin-types). + +## Getting Started + +This section takes you through the process of running and building a quickstart **Go plugin**, included within Tyk's [getting started](https://github.com/TykTechnologies/custom-go-plugin) repository. Go plugins are the recommended plugin type and suitable for most use cases. + +### Expected outcome + +At the end of this process you should have a Tyk Gateway or Tyk Self-Managed environment running locally, with a simple Go plugin executing on each API request. For each reponse to an API request the example plugin will inject a *Foo* header, with a value of *Bar*. + +### Prerequisites + +- [Docker](https://docs.docker.com/get-docker/) +- [Docker-compose](https://docs.docker.com/compose/install/) +- [Tyk license](https://tyk.io/sign-up/#self) (if using Self-Managed Tyk, which will make the process easier via UI) +- [Make](https://www.gnu.org/software/make) +- OSX (Intel) -> Not a prerequisite, though these steps are tested on OSX Intel/ARM + +### Before you begin + +Please clone the [getting started](https://github.com/TykTechnologies/custom-go-plugin) respository. + +```bash +git clone https://github.com/TykTechnologies/custom-go-plugin +cd custom-go-plugin +``` + +### Choose your environment + + + + +**Read time: 15 mins** + +Dashboard Tutorial + + + +**Read time: 15 mins** + +Tyk OSS Gateway Tutorial + + + + + +### Dashboard Plugins + +This quick start explains how to run the [getting started](https://github.com/TykTechnologies/custom-go-plugin) Go plugin within Tyk Dashboard. + +**Estimated time**: 10-15 minutes + +In this tutorial you will learn how to: + +1. Add your Tyk license. +2. Bootstrap the Tyk Dashboard environment. +3. Login to Tyk Dashboard. +4. View the pre-configured API. +5. Test the plugin. +6. View the analytics. +7. Next steps. + +**Steps for Configuration:** + +1. **Add your Tyk license** + + Create and edit the file `.env` with your Tyk Dashboard license key + + ```console + # Make a copy of the example .env file for the Tyk-Dashboard + cp .env.example .env + ``` + +2. **Bootstrap the getting started example** + + run the `make` command: + + ```bash + make + ``` + + This will take a few minutes to run as it compiles the plugin for the first time and downloads all the necessary Docker images. + +3. **Log in to Tyk Dashboard** + + Log on to the Tyk Dashboard on `http://localhost:3000` using the following Bootstrapped credentials: + ``` + demo@tyk.io + ``` + and password: + ``` + topsecretpassword + ``` + + Note: these are editable in `.env.example` + +4. **View the pre-configured API** + + Once you're logged on to the Tyk Dashboard, navigate to the *APIs* screen. + + You'll see a sample *Httpbin* API. Let's click into it for more details. + + Click on *VIEW RAW DEFINITION*. Note the *custom_middleware* block is filled out, injecting the compiled example Go plugin into the API. + +5. **Test the plugin** + + Let's send an API request to the API Gateway so it can reverse proxy to our API. + + ```terminal + curl localhost:8080/httpbin/get + ``` + + Yields the response: + ``` + { + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Foo": "Bar", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-63f78c47-51e22c5b57b8576b1225984a" + }, + "origin": "172.26.0.1, 99.242.70.243", + "url": "http://httpbin.org/get" + } + ``` + + Note, we see a *Foo:Bar* HTTP Header was injected by our Go plugin and echoed back to us by the Httpbin mock server. + +6. **View the analytics** + + Navigate to the Dashboard's various *API Usage Data* to view analytics on the API request! + +### Open-Source Plugins + +This quick start guide will explain how to run the [getting started](https://github.com/TykTechnologies/custom-go-plugin) Go plugin using the Tyk OSS Gateway. + +**Steps for Configuration:** + +1. **Bootstrap the getting started example** + + Please run the following command from within your newly cloned directory to run the Tyk Stack and compile the sample plugin. This will take a few minutes as we have to download all the necessary dependencies and docker images. + + ```bash + make up-oss && make build + ``` + +2. **Test the plugin** + + Let's test the plugin by sending an API request to the pre-configured API definition: + + ``` + curl localhost:8080/httpbin/get + ``` + + Response: + ``` + { + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Foo": "Bar", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1" + }, + "origin": "172.28.0.1, 99.242.70.243", + "url": "http://httpbin.org/get" + } + ``` + + We've sent an API request to the Gateway. We can see that the sample custom plugin has injected an HTTP header with a value of *Foo:Bar*. This header was echoed back in the Response Body via the mock Httpbin server. + + The `./tyk/scripts/bootstrap-oss.sh` script creates an API definition that includes the custom plugin. + +3. **View the analytics** + + We can see that Tyk Pump is running in the background. Let's check the logs after sending the API request: + + ``` + docker logs custom-go-plugin_tyk-pump_1 + ``` + + Output: + ``` + time="Feb 23 16:29:27" level=info msg="Purged 1 records..." prefix=stdout-pump + {"level":"info","msg":"","time":"0001-01-01T00:00:00Z","tyk-analytics-record":{"method":"GET","host":"httpbin.org","path":"/get","raw_path":"/get","content_length":0,"user_agent":"curl/7.79.1","day":23,"month":2,"year":2023,"hour":16,"response_code":200,"api_key":"00000000","timestamp":"2023-02-23T16:29:27.53328605Z","api_version":"Non Versioned","api_name":"httpbin","api_id":"845b8ed1ae964ea5a6eccab6abf3f3de","org_id":"","oauth_id":"","request_time":1128,"raw_request":"...","raw_response":"...","ip_address":"192.168.0.1","geo":{"country":{"iso_code":""},"city":{"geoname_id":0,"names":null},"location":{"latitude":0,"longitude":0,"time_zone":""}},"network":{"open_connections":0,"closed_connections":0,"bytes_in":0,"bytes_out":0},"latency":{"total":1128,"upstream":1111},"tags":["key-00000000","api-845b8ed1ae964ea5a6eccab6abf3f3de"],"alias":"","track_path":false,"expireAt":"2023-03-02T16:29:27.54271855Z","api_schema":""}} + ``` + + As we can see, when we send API requests, the Tyk Pump will scrape them from Redis and then send them to a persistent store as configured in the Tyk Pump env file. + + In this example, we've configured a simple `STDOUT` Pump where the records will be printed to the Standard OUT (docker logs!) + +## API Configuration + +This page provides an overview on how to register one or more custom plugins to be executed at different stages or [hooks](/api-management/plugins/plugin-types#plugin-and-hook-types) in the API request/response lifecycle. If you wish to learn how to register custom plugins to be executed on the traffic logs generated by the Gateway please refer to the [analytics plugins](/api-management/plugins/plugin-types#analytics-plugins) page. + +If you need fine-grained control at the endpoint level then it is also possible to configure [per-endpoint plugins](/api-management/plugins/plugin-types#per-endpoint-custom-plugins). These are custom Golang plugins that are triggered at the end of the request processing chain before API-level *Post* plugins are executed. + +--- + +### Introduction + +There are three locations where Tyk Gateway can find plugin functions: + +1. **gRPC plugins**: Plugin functions are implemented by a gRPC server with the associated configuration specified with the API definition. For further details on how to configure gRPC plugins, please refer to our [gRPC](/api-management/plugins/rich-plugins#overview-1) documentation. +2. **Local plugins**: Plugins are implemented by functions within source code files located on the Gateway's file system. The API Definition allows the source code file path and function name to be configured for each plugin. For further details read on. +3. **Plugin bundles**: The plugin source code and configuration are bundled into a zip file that is served by a remote web server. For further details see the [plugin bundles](/api-management/plugins/overview#plugin-bundles) page. + +### Plugin configuration + +Each plugin for an API can be configured within the API Definition with the following details: + +| Property | Description | +| :------- | :------------- | +| `Enabled` | When true, the plugin is activated | +| `Name` | A name used to identify the plugin | +| `Path` | The path to the source code file on the Tyk Gateway file system | +| `Function name` | The name of the function that implements the plugin. The function should exist within the source code file referenced in `path` | +| `Raw body only` | When set to true, this flag indicates that only the raw request body should be processed | +| `Require session state`| When set to true, Tyk Gateway will serialize the request session state and pass it as an argument to the function that implements the plugin in the target language. This is applicable to Post, Response, and Authentication hooks only | + +--- + +### Language configuration + +For local and bundle plugins a [plugin driver](/api-management/plugins/overview#plugin-driver-names) is configured to specify the plugin implementation language. If using gRPC plugins a `grpc` plugin driver should be used to instruct Tyk to request execution of plugins from within a gRPC server that is external to the Tyk process. This offers additional language support since Tyk can integrate with a gRPC server that is implemented using any supported [gRPC language](https://grpc.io/docs/). + +For a given API it is not possible to mix the implementation language for the plugin types: Pre, Authentication, Post, Post Authentication and Response plugins. For example, it is not possible to implement a pre request plugin in *Go* and also implement a post request plugin in *Python* for the same API. + +### Tyk OAS APIs + +An API can be configured so that one or more of its associated plugins can execute at different phases of the request / response life cycle. Each plugin configuration serves to identify the plugin source file path and the name of the corresponding function, triggered at each request / response lifecycle stage. + +This guide explains how to configure plugins for Tyk OAS APIs within the [Tyk OAS API definition](#tyk-oas-apidef) or via the [API designer](#tyk-oas-dashboard) in Tyk Dashboard. + +If you’re using the legacy Tyk Classic APIs, then check out the [Tyk Classic](/api-management/plugins/overview#tyk-classic-apis) page. + +#### Using API Definition + + +The `x-tyk-api-gateway.middleware.global` section is used to configure plugins in a Tyk OAS API. It contains a `pluginConfig` section and a list of plugins for each phase of the API request / response lifecycle. + +The `pluginConfig` section contains the `driver` parameter that is used to configure the plugin implementation [language](/api-management/plugins/overview#plugin-driver-names): + +```yaml +"pluginConfig": { + "driver": "goplugin" +} +``` + +Within the `x-tyk-api-gateway.middleware.global` section, keyed lists of plugins can be configured for each phase of the API request / response lifecycle described in the table below: + +| Phase | Description | Config Key | +| :----- | :--- | :---- | +| Pre | Executed at the start of the request processing chain | `prePlugins` | +| Post Auth | Executed after the requester has been authenticated | `postAuthenticationPlugins` | +| Post | Executed at the end of the request processing chain | `postPlugins` | +| Response | Occurs after the main request processing but before the response is sent. | `responsePlugins` | + +Each plugin configuration can have the following fields configured: + +- `enabled`: When true, enables the plugin. +- `functionName`: The name of the function that implements the plugin within the source file. +- `path`: The path to the plugin source file. +- `rawBodyOnly`: When true, indicates that only the raw body should be processed. +- `requireSession`: When true, indicates that session metadata will be available to the plugin. This is applicable only for post, post authentication and response plugins. + +For example a Post Authentication plugin would be configured within a `postAuthenticationPlugins` list as shown below: + +```yaml +"postAuthenticationPlugins": [ + { + "enabled": true, + "functionName": "post_authentication_func", + "path": "/path/to/plugin1.so", + "rawBodyOnly": true, + "requireSession": true + } +] +``` + +An full example is given below to illustrate how to set up plugins for different phases of the request / response lifecycle: + +```json {linenos=true, linenostart=1, hl_lines=["15-52"]} +{ + "x-tyk-api-gateway": { + "info": { + "dbId": "667962397f6de50001508ac4", + "id": "b4d8ac6e5a274d7c7959d069b47dc206", + "orgId": "6672f4377f6de50001508abf", + "name": "OAS APIs Plugins", + "state": { + "active": true, + "internal": false + } + }, + "middleware": { + "global": { + "pluginConfig": { + "driver": "goplugin" + }, + "postAuthenticationPlugins": [ + { + "enabled": true, + "functionName": "post_authentication_func", + "path": "/path/to/plugin1.so", + "rawBodyOnly": true, + "requireSession": true + } + ], + "postPlugins": [ + { + "enabled": true, + "functionName": "postplugin", + "path": "/path/to/plugin1.so", + "rawBodyOnly": true, + "requireSession": true + } + ], + "prePlugins": [ + { + "enabled": true, + "functionName": "pre-plugin", + "path": "/path/to/plugin1.so" + } + ], + "responsePlugins": [ + { + "enabled": true, + "functionName": "Response", + "path": "/path/to/plugin1.so", + "rawBodyOnly": true, + "requireSession": true + } + ] + } + } + } +} +``` + +In this example we can see that the plugin driver has been configured by setting the `driver` field to `goplugin` within the `pluginConfig` object. This configuration instructs Tyk Gateway that our plugins are implemented using Golang. + +We can also see that the following type of plugins are configured: + +- **Pre**: A plugin is configured within the `prePlugins` list. The plugin is enabled and implemented by function `pre-plugin` within the source file located at path `/path/to/plugin1.so`. +- **Post Authentication**: A plugin is configured within the `postAuthenticationPlugins` list. The plugin is enabled and implemented by function `post_authentication_func` within the source file located at path `/path/to/plugin1.so`. The raw request body and session metadata is available to the plugin. +- **Post**: A plugin is configured within the `responsePlugins` list. The plugin is enabled and implemented by function `postplugin` within the source file located at path `/path/to/plugin1.so`. The raw request body and session metadata is available to the plugin. +- **Response**: A plugin is configured within the `postPlugins` list. The plugin is enabled and implemented by function `Response` within the source file located at path `/path/to/plugin1.so`. The raw request body and session metadata is available to the plugin. + +The configuration above is a complete and valid Tyk OAS API Definition that you can use as a basis for trying out custom plugins. You will need to update the [driver](/api-management/plugins/overview#plugin-driver-names) parameter to reflect the target language type of your plugins. You will also need to update the `path` and `functionName` parameters for each plugin to reflect the source code. + +#### Using API Designer + + +Select your API from the list of *Created APIs* to reach the API designer and then follow these steps: + +1. **Configure plugin type and custom data** + + In the *Plugins Configuration* section, select the *Plugin Driver*, which tells Tyk which type of plugin to expect: Go, gRPC, JavaScript (OTTO), Lua or Python. + + You can configure custom data that will be made available to your plugin function as a JSON formatted object in the *Config Data* option. + + OAS API Plugins Driver Config + +2. **Configure the custom plugins** + + For each plugin that you wish to register with the API, click on the **Add Plugin** button to display a plugin configuration section: + + OAS Plugins Config Section + + Complete the following fields: + + - `Function Name`: Enter the name of the function within your plugin code that Tyk should invoke. + - `Path`: Enter the path to the source file that contains the function that implements your plugin. + - `Raw Body Only`: Optionally, toggle the *Raw Body Only* switch to true when you do not wish to fill body in request or response object for your plugins. + +3. **Save the API** + + Select **Save API** to apply the changes to your API. + +### Tyk Classic APIs + +An API can be configured so that one or more of its associated plugins can execute at different phases of the request / response lifecycle. Each plugin configuration serves to identify the plugin source file path and the name of the corresponding function, triggered at each request / response lifecycle stage. + +This guide explains how to configure plugins for Tyk Classic APIs within the [Tyk Classic API definition](#tyk-classic-apidef) or via the [API designer](#tyk-classic-dashboard) in Tyk Dashboard. + +If you’re using the newer Tyk OAS APIs, then check out the [Tyk OAS](/api-management/plugins/overview#tyk-oas-apis) page. + +#### Using API Definition + + +In Tyk Classic APIs, the *custom_middleware* section of the Tyk Classic API Definition is where you configure plugins that will run at different points during the lifecycle of an API request. + +This table illustrates the different phases of the API request lifecycle where custom plugins can be executed: + +| Phase | Description | Config | +| :----- | :--- | :---- | +| Pre | Executed at the start of the request processing chain | `pre` | +| Auth | Executed during the authentication step | `auth_check` | +| Post Auth | Executed after the requester has been authenticated | `post_key_auth` | +| Post | Executed at the end of the request processing chain | `post` | +| Response | Executed on the response received from the upstream | `response` | + +This example configuration illustrates how to set up plugins for different phases of the request lifecycle: + +```json {linenos=true, linenostart=1} +{ + "custom_middleware": { + "pre": [ + { + "name": "PreHook1", + "path": "/path/to/plugin1.so", + "disabled": false, + "require_session": false, + "raw_body_only": false + } + ], + "auth_check": { + "name": "AuthCheck", + "path": "/path/to/plugin.so", + "disabled": false, + "require_session": false, + "raw_body_only": false + }, + "post_key_auth": [ + { + "name": "PostKeyAuth", + "path": "/path/to/plugin.so", + "disabled": false, + "require_session": false, + "raw_body_only": false + } + ], + "post": [ + { + "name": "PostHook1", + "path": "/path/to/plugin1.so", + "disabled": false, + "require_session": false, + "raw_body_only": false + }, + { + "name": "PostHook2", + "path": "/path/to/plugin2.so", + "disabled": false, + "require_session": false, + "raw_body_only": false + } + ], + "response": [ + { + "name": "ResponseHook", + "path": "/path/to/plugin.so", + "disabled": false, + "require_session": false, + "raw_body_only": false + } + ], + "driver": "goplugin" + } +} +``` + +In this example we can see that there are Golang custom authentication (`auth_check`), post authentication (`post_key_auth`), post, pre and response plugins configured. + +It can be seen that each plugin is configured with the specific function name and associated source file path of the file that contains the function. Furthermore, each lifecycle phase (except `auth`) can have a list of plugins configured, allowing for complex processing workflows. For example, you might develop one plugin for logging and another for modifying the request in the pre request phase. When multiple plugins are configured for a phase they will be executed in the order that they appear in the API definition. + +The `driver` configuration parameter describes the plugin implementation language. Please refer to the [supported languages](/api-management/plugins/overview#plugin-driver-names) section for list of supported plugin driver names. + +Each plugin can have additional settings, such as: +- `disabled`: When true, disables the plugin. +- `raw_body_only`: When true, indicates that only the raw body should be processed. +- `require_session`: When true, indicates that session metadata will be available to the plugin. This is applicable only for post, post authentication and response plugins. + +#### Using API Designer + + +This section explains how to configure plugins for a Tyk Classic API using Tyk Dashboard. It specifically covers the use case where the source files of your plugins are deployed on the Tyk Gateway file system. + +Select your API from the list of *Created APIs* to reach the API designer and then follow these steps: + +Plugins Classic API screen + +1. **Display the Tyk Classic API Definition editor** + + Click on the **View Raw Definition** button to display an editor for updating the Tyk Classic API Definition. + + Plugins Classic API Definition editor screen + +2. **Edit the Tyk Classic API Definition to configure plugins** + + Use the editor to edit the `custom_middleware` section of the [Tyk Classic API Definition](/api-management/plugins/overview#tyk-classic-apis). + + Plugins Classic API Bundle Field + +3. **Save changes** + + Select the **Update** button to apply your changes to the Tyk Classic API Definition. + +## Plugin Deployment Types + +There are a variety of scenarios relating to the deployment of plugins for an API, concerning the location of the plugin source code and its associated configuration. + +### Local Plugins + +The plugin source code and associated configuration are co-located with Tyk Gateway in the same file system. The configuration is located within the API Definition. For further details please consult [API configuration](/api-management/plugins/overview#api-configuration). + +### Plugin Bundles (Remote) + +The plugin source code and associated configuration are bundled into a zip file and uploaded to a remote webserver. Multiple plugins can be stored in a single *plugin bundle*. Tyk Gateway will download the plugin bundle from the remote webserver and then extract, cache and execute plugins for each of the configured phases of the API request / response lifecycle. For further details on plugin bundles and how to configure them, please refer to the [plugin bundles](/api-management/plugins/overview#plugin-bundles) page. + +### gRPC Plugins (Remote) + +Custom plugins can be hosted on a remote server and executed from the Tyk Gateway middleware chain via gRPC. These plugins can be written in any language you prefer, as they are executed on the gRPC server. You'll configure your API definition so that Tyk Gateway will send requests to your gRPC server at the appropriate points in the API request / response lifecycle. For further details please consult our [gRPC](/api-management/plugins/rich-plugins#overview-1) documentation. + +## Plugin Bundles + +For Tyk Gateway to execute local custom plugins during the processing of API requests and responses, the plugin source code must be loaded into the Gateway. The source is usually stored in files and the API definition is used to point the Gateway at the correct file for each [plugin type](/api-management/plugins/plugin-types#plugin-types). To simplify the management of plugins, you can group (or *bundle*) multiple plugin files together in a ZIP file that is referred to as a *plugin bundle*. + +### When To Use Plugin Bundles + +Plugin bundles are intended to simplify the process of attaching and loading custom middleware. Multiple API definitions can refer to the same plugin bundle (containing the source code and configuration) if required. Having this common, shared resource avoids you from having to duplicate plugin configuration for each of your APIs definitions. + +### How Plugin Bundles Work + +The source code and a [manifest file](#manifest) are bundled into a zip file and uploaded to an external remote web server. The manifest file references the source code file path and the function name within the code that should be invoked for each [plugin type](/api-management/plugins/plugin-types#plugin-types). Within the API definition, custom plugins are configured simply using the name of the bundle (zip file). Tyk Gateway downloads, caches, extracts and executes plugins from the downloaded bundle according to the configuration in the manifest file. + +plugin bundles architectural overview + +#### Caching plugin bundles + +Tyk downloads a plugin bundle on startup based on the configuration in the API definition, e.g. `http://my-bundle-server.com/bundles/bundle-latest.zip`. The bundle contents will be cached so that, when a Tyk reload event occurs, the Gateway does not have to retrieve the bundle from the server again each time. If you want to use a different bundle then you must update your API to retrieve a different bundle filename and then trigger a reload. It is not sufficient simply to replace the bundle file on your server with an updated version with the same name - the caching ensures this will not be retrieved during a reload event. + +As a suggestion, you may organize your plugin bundle files using a Git commit reference or version number, e.g. `bundle-e5e6044.zip`, `bundle-48714c8.zip`, `bundle-1.0.0.zip`, `bundle-1.0.1.zip`, etc. + +Alternatively, you may delete the cached bundle from Tyk manually and then trigger a hot reload to tell Tyk to fetch a new one. By default, Tyk will store downloaded bundles in this path: +`{ TYK_ROOT } / { CONFIG_MIDDLEWARE_PATH } / bundles` + +#### Gateway configuration + +To configure Tyk Gateway to load plugin bundles the following parameters must be specified in your `tyk.conf`: + +```yaml +"enable_bundle_downloader": true, +"bundle_base_url": "http://my-bundle-server.com/bundles/", +"public_key_path": "/path/to/my/pubkey", +``` + +- `enable_bundle_downloader`: Enables the bundle downloader. +- `bundle_base_url`: A base URL that will be used to download the bundle. For example if we have `bundle-latest.zip` specified in the API definition, Tyk will fetch the following file: `http://my-bundle-server.com/bundles/bundle-latest.zip` (see the next section for details). +- `public_key_path`: Sets a public key, used for verifying signed bundles. If unsigned bundles are used you may omit this. + + + + + Remember to set `"enable_coprocess": true` in your `tyk.conf` when using [rich plugins](/api-management/plugins/overview#plugin-bundles)! + + + +#### The manifest file + + +A plugin bundle must include a manifest file (called `manifest.json`). The manifest file contains important information like the configuration block and the list of source code files that will be included as part of the bundle file. If a file isn't specified in the list, it won't be included in the resulting file, even if it's present in the current directory. + +A sample manifest file looks like this: + +```json +{ + "file_list": [ + "middleware.py", + "mylib.py" + ], + "custom_middleware": { + "pre": [ + { + "name": "PreMiddleware" + } + ], + "post": [ + { + "name": "PostMiddleware" + } + ], + "driver": "python" + }, + "checksum": "", + "signature": "" +} +``` + +You may leave the `checksum` and `signature` fields empty, the bundler tool will fill these during the build process. + +The `custom_middleware` block follows the standard syntax we use for Tyk plugins. In Tyk Community Edition, where file-based API configuration is used by default, a `custom_middleware` block is located/added to the API configuration file. + +#### Creating plugin bundles + +Tyk provides the Bundle CLI tool as part of the `tyk` binary. For further details please visit the [Bundle CLI tool](/api-management/plugins/overview#bundler-cli-tool) page. + +### Tyk OAS API Configuration + +For API plugins that are deployed as [plugin bundles](/api-management/plugins/overview#plugin-bundles), the API should be configured with the name of the plugin bundle file to download from your remote web server. Furthermore, the Gateway should be [configured](/api-management/plugins/overview#gateway-configuration) to enable downloading plugin bundles. + +You can configure your API with the name of the plugin bundle file to download within the Tyk OAS API definition or API Designer. + +If you’re using the legacy Tyk Classic APIs, then check out the [Tyk Classic](/api-management/plugins/overview#tyk-classic-apis) page. + +#### Using API Definition + +The configuration for a Tyk OAS API to fetch the download of a plugin bundle from a remote web server is encapsulated within the `pluginConfig` section within the `middleware.global` section of the `x-tyk-api-gateway` part of a Tyk OAS API Definition. + +The `pluginConfig` section is structured as follows: + +- `bundle`: A JSON entity that contains the following configuration parameters: + - `enabled`: When `true`, enables the plugin. + - `path`: The relative path of the zip file in relation to the base URL configured on the remote webserver that hosts plugin bundles. +- `driver`: Indicates the type of plugin, e.g. `golang`, `grpc`, `lua`, `otto` or `python`. + +An illustrative example is listed below: + +```json{hl_lines=["37-45"], linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-oas-plugin-configuration", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "put": { + "operationId": "anythingput", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-oas-plugin-configuration", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-oas-plugin-configuration/", + "strip": true + } + }, + "middleware": { + "global": { + "pluginConfig": { + "bundle": { + "enabled": true, + "path": "plugin.zip" + }, + "driver": "goplugin" + } + } + } + } +} +``` + +In this example we can see that bundle plugin has been configured within the `middleware.global.pluginConfig.bundle` object. The plugin is enabled and bundled within file `plugin.zip`. The plugin bundle is a Go plugin, i.e. `middleware.global.pluginConfig.driver` has been configured with value `goplugin`. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out custom plugin bundles, assuming that you have provided a valid bundle file named `plugin.zip`. + +#### Using API Designer + +To configure plugin bundles for Tyk OAS APIs click on the APIs menu item in the *API Management* menu of Dashboard and select your API to display the editor screen. Subsequently, follow the steps below: + +1. **Access plugin options** + + Scroll down until the *Enable Plugin* section is displayed. + + Tyk OAS API Bundle section + +2. **Enable plugin bundle for you API** + + Enable a plugin bundle for your API by activating the toggle switch. + +3. **Enter relative path to plugin bundle file** + + Enter the relative path of the plugin bundle file in the *Plugin Bundle ID* field that Tyk Gateway should download from the web server that hosts your plugin bundles. + +4. **Save the API** + + Select **Save API** to apply the changes to your API. + +### Tyk Classic API Configuration + +For custom plugins that are deployed as [plugin bundles](/api-management/plugins/overview#plugin-bundles), the API should be configured with the name of the plugin bundle file to download from your remote web server. Furthermore, the Gateway should be [configured](/api-management/plugins/overview#gateway-configuration) to enable downloading plugin bundles. + +You can configure your API with the name of the plugin bundle file to download within the Tyk Classic API definition or API Designer. + +If you’re using the newer Tyk OAS APIs, then check out the [Tyk OAS](/api-management/plugins/overview#tyk-oas-api-configuration) page. + +#### Using API Definition + +The configuration for an API to fetch and download a plugin bundle from a remote server is encapsulated within the `custom_middleware_bundle` field of the Tyk Classic API Definition. An illustrative example is listed below: + +```json {hl_lines=["33"], linenos=true, linenostart=1} +{ + "name": "Tyk Classic Bundle API", + "api_id": "1", + "org_id": "default", + "definition": { + "location": "header", + "key": "version" + }, + "auth": { + "auth_header_name": "authorization" + }, + "use_keyless": true, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "expires": "3000-01-02 15:04", + "use_extended_paths": true, + "extended_paths": { + "ignored": [], + "white_list": [], + "black_list": [] + } + } + } + }, + "proxy": { + "listen_path": "/quickstart/", + "target_url": "http://httpbin.org", + "strip_listen_path": true + }, + "custom_middleware_bundle": "bundle-latest.zip" +} +``` + +With the configuration given in the example above, calls to the API will invoke the custom plugins defined in the `manifest.json` file contained within `bundle-latest.zip` uploaded to your remote webserver, e.g. `http://your-example-plugin-server.com/plugins`. + +Tyk Gateway should be configured for downloading plugin bundles from a secured web server. Please consult the [plugin bundles](/api-management/plugins/overview#plugin-bundles) documentation for further details. + +#### Using API Designer + +To configure plugin bundles for Tyk Classic APIs click on the APIs menu item in the *API Management* menu of Dashboard and select your API to display the API editor screen. Subsequently, follow the steps below: + +1. **Access plugin options** + + Click on the *Advanced Options* tab and scroll down until the *Plugin Options* section is displayed. + + Tyk Classic Plugin Options section + +2. **Enter relative path to bundle file** + + Enter the relative path of the plugin bundle file in the *Plugin Bundle ID* field that Tyk Gateway should download from the web server hosting plugin bundles. + +3. **Save the API** + + Select the **save** or **update** button to apply the changes to your API. + +### Bundler CLI Tool + +The bundler tool is a CLI service, provided by _Tyk Gateway_ as part of its binary since v2.8. This lets you generate +[plugin bundles](/api-management/plugins/overview#plugin-bundles). + + + +Generated plugin bundles must be served using your own web server. + + + +Issue the following command to see more details on the `bundle` command: + +```bash +/opt/tyk-gateway/bin/tyk bundle -h +``` + +--- + +#### Prerequisites + +To create plugin bundles you will need the following: + +- **Manifest.json**: The [manifest.json](/api-management/plugins/overview#manifest) file + contains the paths to the plugin source files and the name of the function implementing each plugin. The + _manifest.json_ file is mandatory and must exist on the Tyk Gateway file system. By default the bundle CLI looks for + a file named _manifest.json_ in the current working directory where the bundle command is run from. The exact location + can be specified using the `--manifest` command option. +- **Plugin source code files**: The plugin source code files should be contained relative to the directory in which the + _manifest.json_ file is located. The _manifest.json_ should contain relative path references to source code files. + + + +Source code files are not required when creating a plugin bundle for gRPC plugins since the plugin + source code is located at the gRPC server. + + + +- **Certificate key**: Plugin bundles can optionally be signed with an RSA private key. The corresponding public key + should be located in the file configured in environmental variable `TYK_GW_PUBLICKEYPATH` or the `public_key_path` + parameter in `tyk.conf`: + +```json +{ + "enable_bundle_downloader": true, + "bundle_base_url": "http://my-bundle-server.com/bundles/", + "public_key_path": "/path/to/my/pubkey.pem" +} +``` + +--- + +#### Directory Structure + +A suggested directory structure is shown below for Golang, Javascript and Python bundles in the tabs below. + + + +Sub-directories (folders) are not supported inside the `bundle-directory` location. + + + + + +```bash +/bundle-directory +β”œβ”€β”€ manifest.json # Manifest file with plugin references +└── plugin.so # Compiled Golang plugin +``` + + + +```bash +/bundle-directory +β”œβ”€β”€ manifest.json # Manifest file with plugin references +β”œβ”€β”€ plugin1.js # First JavaScript plugin source file +└── plugin2.js # Second JavaScript plugin source file +``` + + + + + +```bash +/bundle-directory +β”œβ”€β”€ manifest.json # Manifest file with plugin references +β”œβ”€β”€ plugin1.py # First Python plugin source file +└── plugin2.py # Second Python plugin source file +``` + + + + + +The `manifest.json` will reference the files located in the `bundle-directory`, ensure plugin source files are organized relative to the manifest. The Tyk Gateway will load and execute these plugins based on the paths defined in the `manifest.json` file. + +Sample `manifest.json` is shown below for Golang, Javascript and Python bundles in the tabs below. + + + +```json +{ + "file_list": [ + "plugin.so" + ], + "custom_middleware": { + "pre": [ + { + "name": "PreMiddleware", + "path": "./plugin.so" + } + ], + "post": [ + { + "name": "PostMiddleware", + "path": "./plugin.so" + } + ], + "driver": "goplugin" + }, + "checksum": "", + "signature": "" +} + +``` + + + +```json +{ + "file_list": [ + "plugin1.js", + "plugin2.js" + ], + "custom_middleware": { + "pre": [ + { + "name": "PreMiddleware", + "path": "./plugin1.js" + } + ], + "post": [ + { + "name": "PostMiddleware", + "path": "./plugin2.js" + } + ], + "driver": "otto" + }, + "checksum": "", + "signature": "" +} +``` + + + + + +```json +{ + "file_list": [ + "plugin1.py", + "plugin2.py" + ], + "custom_middleware": { + "pre": [ + { + "name": "PreMiddleware", + "path": "./plugin1.py" + } + ], + "post": [ + { + "name": "PostMiddleware", + "path": "./plugin2.py" + } + ], + "driver": "python" + }, + "checksum": "", + "signature": "" +} +``` + + + + + +--- + +#### Creating a plugin bundle + +Run the following command to create the bundle: + +```bash +$ tyk bundle build +``` + +The resulting file will contain all your specified files and a modified `manifest.json` with the checksum and signature +(if required) applied, in ZIP format. + +By default, Tyk will attempt to sign plugin bundles for improved security. If no private key is specified, the program +will prompt for a confirmation. Use `-y` to override this (see options below). + +--- + +#### Command Options + +Instructions on how to create plugin bundles is displayed by issuing the following command: + +```bash +/opt/tyk-gateway/bin/tyk bundle build -h +``` + +The following options are supported: + +- `--manifest`: Specifies the path to the manifest file. This defaults to `manifest.json` within the current working + directory. +- `--output`: Specifies the name of the bundle file e.g. `--output bundle-latest.zip`. If this flag is not specified, + `bundle.zip` will be used. +- `-y`: Force tool to create unsigned bundle without prompting e.g. `$ tyk bundle build --output bundle-latest.zip -y`. +- `--key`: Specifies the path to your private key which is used to generate signed bundle e.g. + `$ tyk bundle build --output bundle-latest.zip --key=mykey.pem`. + +--- + +#### Docker Example + +Since v5.5 Tyk Gateway uses distroless docker images. + +For Gateway version < v5.5 it is possible to use Docker to create plugin bundles as shown in the example below. + +```bash +docker run --rm -it \ + --name bundler \ + -v `pwd`:/plugin-source \ + -v `pwd`/../../../confs/keys:/keys \ + -w /plugin-source \ + --entrypoint /bin/bash \ + tykio/tyk-gateway:v5.4.0 \ + -c 'export PATH="/opt/tyk-gateway:$$PATH"; tyk bundle build -o bundle.zip -k /keys/key.pem' +``` + +This Docker command runs a container using the `tykio/tyk-gateway:v5.4.0` image to build a Tyk plugin bundle. It mounts +the current directory from the host as `/plugin-source` and a directory containing keys as `/keys` inside the container. +The working directory within the container is set to `/plugin-source`, and the default entrypoint is overridden to use +`/bin/bash`. The command executed in the container exports a modified `PATH` to include the Tyk Gateway binaries, then +runs `tyk bundle build` to generate a plugin bundle named `bundle.zip`, using the specified key for authentication. The +container is automatically removed after the command completes, and the operation is conducted interactively. + +## Supported Languages + +The following languages are supported for custom plugins: +* [Golang](/api-management/plugins/golang#): A plugin written in Golang is called a **Native Plugin**. Tyk recommends using Go plugins for performance, flexibility, and nativity reasons (all Tyk components are written in Go). +* [JavaScript](/api-management/plugins/javascript#): A plugin written in Javascript uses JavaScript Virtual Machine (JSVM) interpreter. +* [Rich Plugins](/api-management/plugins/rich-plugins#) includes Python, Lua, gRPC - With gRPC, you can write plugins in Java, .NET, C++ / C#, PHP, and all other [gRPC supported languages](https://grpc.io/docs/languages/). +Rich plugins give ultimate flexibility in the language of implementation, however, there are some performance and management overheads when compared with native GoLang plugin. + +**Common To All Plugin Languages:** + +* Make Layer 4 (TCP) or Layer 7 (HTTP/REST/SOAP) calls +* Open Persistent Connections +* Modify the request in-flight +* Used to stop the request and return a [custom response](/api-management/plugins/plugin-types#return-overrides--returnoverrides) +* Be served using [Bundles](/api-management/plugins/overview#plugin-deployment-types) or by files on the file system, except gRPC of course which by definition is served by some webserver in the language of your choosing + +### Plugin Hook Types + +Tyk provide 5 different phases, i.e. hooks to inject custom plugin throughout the [API execution lifecycle](/api-management/traffic-transformation#request-middleware-chain). + +Not all hooks are supported in every language. The following table shows you which plugin language support which phase/hook: + +| | Auth | Pre | Post-Auth | Post | Response +| :------------ | :-------- | :---------- | :----------- | :------ | :----------- | +| GoLang | βœ… |βœ… |βœ… |βœ… |βœ… +| JavaScript | ❌ |βœ… |❌ |βœ… |❌ +| gRPC | βœ… |βœ… |βœ… |βœ… |βœ… +| Python | βœ… |βœ… |βœ… |βœ… |βœ… +| Lua | βœ… |βœ… |βœ… |βœ… |❌ + +More reading on the [hook types](/api-management/plugins/rich-plugins#coprocess-dispatcher---hooks) in rich plugins and explanation with common use case for each [hook type](/api-management/plugins/plugin-types#plugin-types) + + +### Plugin Driver Names + +We use the following Plugin driver names: + +| Plugin | Name | +| :---------- | :--------- | +| GoLang | goplugin | +| JavaScript | otto | +| gRPC | grpc | +| Python | python | +| Lua | lua | + +### Limitations + +What are the limitations to using this programming Language? + +| | GoLang | JavaScript | gRPC | Python | Lua +| :--- | :-------- | :------------------ | :----------- | :----------- | :----------- | +| Runs in Gateway process | βœ…
Runs
natively | βœ…
Built-In JSVM Interpreter | ❌
Standalone server | βœ…
Tyk talks with Python interpreter |βœ… +| Built-in SDK | βœ…
All Gateway Functionality | βœ…
[Yes](/api-management/plugins/javascript#javascript-api) | ❌ | βœ…
[Yes](/api-management/plugins/rich-plugins#tyk-python-api-methods) | ❌ +| TCP Connections

(DBs, Redis, etc)

| βœ… | ❌
Very Limited | βœ… | βœ… | βœ… | + +### Custom Plugin Table + +We have put together a [GitHub repo with a table of custom plugins](https://github.com/TykTechnologies/custom-plugins#custom-gateway-plugins) in various languages that you can experiment with. If you would like to submit one that you have developed, feel free to open an issue in the repo. + +### Differences between Rich Plugins and JSVM middleware + +#### JavaScript +The JavaScript Virtual Machine provides pluggable middleware that can modify a request on the fly and are designed to augment a running Tyk process, are easy to implement and run inside the Tyk process in a sandboxed *ECMAScript* interpreter. This is good, but there are some drawbacks with the JSVM: + +* **Performance**: JSVM is performant, but is not easy to optimize and is dependent on the [otto interpreter](https://github.com/robertkrimen/otto) - this is not ideal. The JSVM also requires a copy of the interpreter object for each request to be made, which can increase memory footprint. +* **Extensibility**: JSVM is a limited interpreter, although it can use some NPM modules, it isn't NodeJS so writing interoperable code (especially with other DBs) is difficult. +* **TCP Access**: The JSVM has no socket access so working with DB drivers and directly with Redis is not possible. + +#### Rich Plugins +Rich Plugins can provide replacements for existing middleware functions (as opposed to augmentation) and are designed to be full-blown, optimized, highly capable services. They enable a full customized architecture to be built that integrates with a user's infrastructure. + +Rich Plugins bring about the following improvements: + +* **Performance**: Run on STDIN (unix pipes), which are extremely fast and run in their own memory space, and so can be optimized for performance way beyond what the JSVM could offer. +* **Extensibility**: By allowing any language to be used so long as GRPC is supported, the extensibility of a CPH is completely open. +* **TCP Access**: Because a plugin is a separate process, it can have it's own low-level TCP connections opens to databases and services. + +## Plugin Caveats + +- Tyk Gateway manages plugins for each API within the same process. +- For [gRPC plugins](/api-management/plugins/rich-plugins#overview-1), Tyk Gateway can only be configured to integrate with one gRPC server. +- Javascript plugins only allow Pre and Post Request hooks of the API Request Lifecycle. + + +## Plugins Hub + +{/* Want to try and get a design layout setup for this that uses stylesheets from home page to offer similar layout */} + +Welcome to the Tyk Plugins Hub, dedicated to providing you with a curated list of resources that showcase how to develop Tyk Plugins. + +[Tyk Plugins](/api-management/plugins/overview#) are a powerful tool that allows you to develop custom middleware that can intercept requests at different stages of the request lifecycle, modifying/transforming headers and body content. + +Tyk has extensive support for writing custom plugins using a wide range of languages, most notably: Go, Python, Javascript etc. In fact, plugins can be developed using most languages via *gRPC*. + +### Blogs + +Selected blogs for plugin development are included below. Further examples are available at the Tyk [website](https://tyk.io/?s=plugin). + +1. **[Decoupling micro-services using Message-based RPC](https://medium.com/@asoorm/decoupling-micro-services-using-message-based-rpc-fa1c12409d8f)** + + - **Summary**: Explains how to write a plugin that intercepts an API request and forwards it to a gRPC server. The gRPC server processes the request and dispatches work to an RabbitMQ message queue. The source code is available in the accompanying [GitHub repository](https://github.com/asoorm/tyk-rmq-middleware) + +2. **[How to configure a gRPC server using Tyk](https://tyk.io/blog/how-to-configure-a-grpc-server-using-tyk/)** + + - **Summary**: Explains how to configure a Python implementation of a gRPC server to add additional logic to API requests. During the request lifecycle, the Tyk-Gateway acts as a gRPC client that contacts the Python gRPC server, providing additional custom logic. + +3. **[How to deploy Python plugins in Tyk running On Kubernetes](https://tyk.io/blog/how-to-deploy-python-plugins-in-tyk-running-on-kubernetes/)** + + - **Summary**: Explains how to deploy a custom Python plugin into a Tyk installation running on a Kubernetes cluster. + +### GitHub Repositories + +Here are some carefully selected GitHub repositories that will help you learn how to integrate and utilize Tyk Plugins in your development projects: + +1. **[Tyk Awesome Plugins](https://github.com/TykTechnologies/tyk-awesome-plugins)** + + - **Description**: Index of plugins developed using a variety of languages. + - **Key Features Demonstrated**: A comprehensive index for a collection of plugins that can be used with the Tyk API Gateway in areas such as: rate limiting, authentication and request transformation. The examples are developed using a diverse array of languages, including but not limited to: Python, JavaScript and Go. This broad language support ensures that developers from different backgrounds and with various language preferences can seamlessly integrate these plugins with their Tyk API Gateway implementations. + +2. **[Custom Plugin Examples](https://github.com/TykTechnologies/custom-plugin-examples/tree/master)** + + - **Description**: Index of examples for a range of plugin hooks (Pre, Post, Post-Auth and Response) developed using a variety of languages. + - **Key Features Demonstrated**: Specific examples include invoking an AWS lambda, inserting a new claim into a JWT, inject a signed JWT into authorization header, request header modification. A range of examples are available including Python, Java, Ruby, Javascript, NodeJS and Go. + +3. **[Environment For Plugin Development](https://github.com/TykTechnologies/custom-go-plugin)** + + - **Description**: Provides a docker-compose environment for developing your own custom Go plugins. + - **Key Features Demonstrated**: Showcases support for bundling plugins, uploading plugins to AWS S3 storage, test coverage etc. + + diff --git a/api-management/plugins/plugin-types.mdx b/api-management/plugins/plugin-types.mdx new file mode 100644 index 000000000..0722c9109 --- /dev/null +++ b/api-management/plugins/plugin-types.mdx @@ -0,0 +1,724 @@ +--- +title: "Plugin Types" +description: "How to manage users, teams, permissions, rbac in Tyk Dashboard" +keywords: "Dashboard, User Management, RBAC, Role Based Access Control, User Groups, Teams, Permissions, API Ownership, SSO, Single Sing On, Multi Tenancy" +sidebarTitle: "Plugin Types" +--- + +## Introduction + +Custom Plugins enable users to execute custom code to complete tasks specific to their use case, allowing users to complete tasks that would not otherwise be possible using Tyk’s standard middleware options. + +Tyk has a [pre-defined execution order](/api-management/traffic-transformation#request-middleware-chain) for the middleware which also includes **seven hooks** for the custom plugins. As such, users can execute, or `hook`, their plugin in these phases of the API request/response lifecycle based on their specific use case. + +## Plugin and Hook Types +This table includes all the plugin types with the relevant hooks, their place in the execution chain, description and examples: + +| Hook Type (in their execution order) | Plugin Type | HTTP Request/Response phase | Executed before/after reverse proxy to the upstream API | Details | Common Use Cases | +|--------------------------|----|---|--------------|--------------------|--------- +| Pre (Request) | Request Plugin | HTTP request | Before | The first thing to be executed, before any middleware | IP Rate Limit plugins, API Request enrichment | +| Authentication| Authentication Plugin | HTTP request | Before | Replaces Tyk's authentication & authorization middleware with your own business logic | When you need your a custom flow, for example, interfacing with legacy Auth database | +| Post-Auth (Request)| Authentication Plugin | HTTP request | Before | Executed immediately after authentication middleware | Additional special custom authentication is needed | +| Post (Request)| Request Plugin | HTTP request| Before | The final middleware to be executed during the *HTTP request* phase (see **Note** below) | Update the request before it gets to the upstream, for example, adding a header that might override another header, so we add it at the end to ensure it doesn't get overridden | +| Response Plugin| Response Plugin | HTTP Response | After | Executed after the reverse proxy to the upstream API | Executed straight after the reverse proxy returns from the upstream API to Tyk | Change the response before the user gets it, for example, change `Location` header from internal to an external URL | +| Analytics Plugin (Request+Response)| Analytics Plugin | HTTP request | After | The final middleware to be executed during the *HTTP response* phase | Change analytics records, for example, obfuscating sensitive data such as the `Authorization` header | + + + +There are two different options for the Post Plugin that is executed at the end of the request processing chain. The API-level Post Plugin is applied to all requests, whilst the [endpoint-level](/api-management/plugins/plugin-types#per-endpoint-custom-plugins) custom Golang plugin is only applied to requests made to specific endpoints. If both are configured, the endpoint-level plugin will be executed first. + + + +## Plugin Types + +Tyk supports four types of plugins: + +1. **[Request Plugin](#request-plugins)** +2. **[Authentication Plugin](#authentication-plugins)** +3. **[Response Plugin](#response-plugins)** +4. **[Analytics Plugin](#analytics-plugins)** + +## Request Plugins + +There are 4 different phases in the [request lifecycle](/api-management/traffic-transformation#request-middleware-chain) you can inject custom plugins, including [Authentication plugins](/api-management/plugins/plugin-types#authentication-plugins). There are performance advantages to picking the correct phase, and of course that depends on your use case and what functionality you need. + +### Hook Capabilities +| Functionality | Pre | Auth | Post-Auth | Post | +| :------------------------- | :---------- | :------------- | :----------- | :----------- | +| Can modify the Header | βœ… | βœ… | βœ… | βœ… +| Can modify the Body | βœ… | βœ… | βœ… |βœ… +| Can modify Query Params | βœ… | βœ… | βœ… |βœ… +| Can view Session1 Details (metadata, quota, context-vars, tags, etc) | ❌ | βœ… |βœ… |βœ… +| Can modify Session1 2 | ❌ | βœ… | ❌ |❌ +| Can Add More Than One3 | βœ… | ❌ |βœ… | βœ… + +1. A [Session object](/api-management/policies#what-is-a-session-object) contains allowances and identity information that is unique to each requestor + +2. You can modify the session by using your programming language's SDK for Redis. Here is an [example](https://github.com/TykTechnologies/custom-plugins/blob/master/plugins/go-auth-multiple_hook_example/main.go#L135) of doing that in Golang. + +3. For select hook locations, you can add more than one plugin. For example, in the same API request, you can have 3 Pre, 1 auth, 5 post-auth, and 2 post plugins. + +### Return Overrides / ReturnOverrides +You can have your plugin finish the request lifecycle and return a response with custom payload & headers to the requestor. + +[Read more here](/api-management/plugins/rich-plugins#returnoverrides) + +##### Python Example + +```python +from tyk.decorators import * + +@Hook +def MyCustomMiddleware(request, session, spec): + print("my_middleware: MyCustomMiddleware") + request.object.return_overrides.headers['content-type'] = 'application/json' + request.object.return_overrides.response_code = 200 + request.object.return_overrides.response_error = "{\"key\": \"value\"}\n" + return request, session +``` + +##### JavaScript Example +```javascript +var testJSVMData = new TykJS.TykMiddleware.NewMiddleware({}); + +testJSVMData.NewProcessRequest(function(request, session, config) { + request.ReturnOverrides.ResponseError = "Foobarbaz" + request.ReturnOverrides.ResponseBody = "Foobar" + request.ReturnOverrides.ResponseCode = 200 + request.ReturnOverrides.ResponseHeaders = { + "X-Foo": "Bar", + "X-Baz": "Qux" + } + return testJSVMData.ReturnData(request, {}); +}); +``` + + +## Authentication Plugins + +If you have unique authentication requirements, you can write a custom authentication plugin. + +### Session Authentication and Authorization + +A very important thing to understand when using custom authentication plugins is that Tyk will continue to perform session authentication and authorization using the information returned by your plugin. Tyk will cache this Session information. **This is necessary in order to do things like rate limiting, access control, quotas, throttling, etc.** + +Tyk will try to be clever about what to cache, but we need to help it. There are two ways to do that, with and without the `ID Extractor`. + +#### The ID Extractor + +The ID Extractor is a caching mechanism that's used in combination with Tyk Plugins. It can be used specifically with plugins that implement custom authentication mechanisms. The ID Extractor works for all rich plugins: gRPC-based plugins, Python and Lua. + +See [ID Extractor](/api-management/plugins/plugin-types#plugin-caching-mechanism) for more details. + +#### Token Metadata + +Tyk creates an in-memory object to track the rate limit, quotas, and more for each session. + +This is why we set the `token` metadata when using custom authentication middleware, in order to give Tyk a unique ID with which to track each session. + +For backwards compatibility, even when using an ID Extractor, we need to continue to set the `token` metadata. For example, when building a session object in GoLang custom middleware: + +```{.copyWrapper} +object.Session = &coprocess.SessionState{ + LastUpdated: time.Now().String(), + Rate: 5, + Per: 10, + QuotaMax: int64(0), + QuotaRenews: time.Now().Unix(), + IdExtractorDeadline: extractorDeadline, + Metadata: map[string]string{ + "token": "my-unique-token", + }, + ApplyPolicies: ["5d8929d8f56e1a138f628269"], + } +``` +[source](https://github.com/TykTechnologies/tyk-grpc-go-basicauth-jwt/blob/master/main.go#L102) + +#### Without ID Extractor + +When not using ID Extractor, Tyk will continue to cache authenticated sessions returned by custom auth plugins. We must set a unique `token` field in the Metadata (see above) that Tyk will use to cache. + +### Supported Languages + +The following languages are supported for custom authentication plugins: + +- All Rich Plugins (gRPC, Python, Lua) +- GoLang + +See the [supported languages](/api-management/plugins/overview#supported-languages) section for custom authentication plugin examples in a language of your choosing. There's also a [blog that walks you through setting up gRPC custom auth in Java](https://tyk.io/blog/how-to-setup-custom-authentication-middleware-using-grpc-and-java/). + +### Tyk Operator + +Please consult the Tyk Operator supporting documentation for examples of how to configure a Tyk Operator API to use: + +- [Go custom authentication plugin](/tyk-stack/tyk-operator/create-an-api#custom-plugin-auth-go) +- [gRPC custom authentication plugin](/tyk-stack/tyk-operator/create-an-api#custom-plugin-auth-grpc) + +## Response Plugins + +Since Tyk 3.0 we have incorporated response hooks, this type of hook allows you to modify the response object returned by the upstream. The flow is follows: + +- Tyk receives the request. +- Tyk runs the full middleware chain, including any other plugins hooks like Pre, Post, Custom Authentication, etc. +- Tyk sends the request to your upstream API. +- The request is received by Tyk and the response hook is triggered. +- Your plugin modifies the response and sends it back to Tyk. +- Tyk takes the modified response and is received by the client. + +This snippet illustrates the hook function signature: + +```python +@Hook +def ResponseHook(request, response, session, metadata, spec): + tyk.log("ResponseHook is called", "info") + # In this hook we have access to the response object, to inspect it, uncomment the following line: + # print(response) + tyk.log("ResponseHook: upstream returned {0}".format(response.status_code), "info") + # Attach a new response header: + response.headers["injectedkey"] = "injectedvalue" + return response +``` + +If working with a Tyk Classic API, you would add this configuration to the API definition: + +``` +{ + "custom_middleware": { + "response": [ + { + "name": "ResponseHook", + "path": "middleware/middleware.py" + } + ], + "driver": "python" + } +} +``` + + - `driver`: set this to the appropriate value for the plugin type (e.g. `python`, `goplugin`) + - `response`: this is the hook name. You use middleware with the `response` hook type because you want this custom middleware to process the request on its return leg of a round trip. + - `response.name`: is your function name from the plugin file. + - `response.path`: is the full or relative (to the Tyk binary) path to the plugin source file. Ensure Tyk has read access to this file. + +Starting from versions 5.0.4 and 5.1.1+ for our Go, Python and Ruby users we have introduced the `multivalue_headers` field to facilitate more flexible and efficient management of headers, particularly for scenarios involving a single header key associated with multiple values. The `multivalue_headers` field, similar to its predecessor, the `headers` field, is a key-value store. However, it can accommodate an array or list of string values for each key, instead of a single string value. This feature empowers you to represent multiple values for a single header key. Here's an example of how you might use `multivalue_headers`, using the Set-Cookie header which often has multiple values: + +``` +multivalue_headers = { + "Set-Cookie": ["sessionToken=abc123; HttpOnly; Secure", "language=en-US; Secure"], +} +``` + +In this example, Set-Cookie header has two associated values: `"sessionToken=abc123; HttpOnly; Secure"` and `"language=en-US; Secure"`. To help you understand this further, let's see how `multivalue_headers` can be used in a Tyk response plugin written in Python: + +```python +from tyk.decorators import * +from gateway import TykGateway as tyk + +@Hook +def Del_ResponseHeader_Middleware(request, response, session, metadata, spec): + # inject a new header with 2 values + new_header = response.multivalue_headers.add() + new_header.key = "Set-Cookie" + new_header.values.extend("sessionToken=abc123; HttpOnly; Secure") + new_header.values.extend("language=en-US; Secure") + + tyk.log(f"Headers content :\n {response.headers}\n----------", "info") + tyk.log(f"Multivalue Headers updated :\n {response.multivalue_headers}\n----------", "info") + + return response +``` + +In this script, we add 2 values for the `Set-Cookie` header and then log both: the traditional `headers` and the new `multivalue_headers`. This is a great way to monitor your transition to `multivalue_headers` and ensure that everything is functioning as expected. + +Please note, while the `headers` field will continue to be available and maintained for backward compatibility, we highly encourage the adoption of `multivalue_headers` for the added flexibility in handling multiple header values. + +### Go response plugins + +[Go response plugins](/api-management/plugins/golang#creating-a-custom-response-plugin) have been available since Tyk v3.2. + +### Supported Response Plugin Languages + +See [Supported Plugins](/api-management/plugins/overview#supported-languages) for details on which languages the response plugin is supported in. + +## Analytics Plugins + +Since Tyk 4.1.0 we have incorporated analytic plugins which enables editing or removal of all parts of analytics records and raw request and responses recorded by Tyk at the gateway level. This feature leverages existing Go plugin infrastructure. + +- Tyk receives the request. +- Tyk runs the full middleware chain, including any other plugins hooks like Pre, Post, Custom Authentication, etc. +- Tyk sends the request to your upstream API. +- The response is received and analytics plugin function is triggered before recording the hit to redis. +- Your plugin modifies the analytics record and sends it back to Tyk. +- Tyk takes the modified analytics record and record the hit in redis. + +Example analytics Go plugins can be found [here](https://github.com/TykTechnologies/tyk/blob/master/test/goplugins/test_goplugin.go#L149) + +An analytics plugin is configured using the `analytics_plugin` configuration block within an API Definition. This contains the following configuration parameters: + +- `enable`: Set to `true` to enable the plugin +- `func_name`: The name of the function representing the plugin +- `plugin_path`: The path to the source code file containing the function that implements the plugin + + + + + +To enable the analytics rewriting functionality, adjust the following in API definition: + +```json +{ + "analytics_plugin": { + "enable": true, + "func_name": "", + "plugin_path": "/analytics_plugin.so" + } +} +``` + + + + + +The example API Definition resource listed below listens on path */httpbin* and forwards requests upstream to *http://httpbin.org*. A Go Analytics Plugin is enabled for function *MaskAnalyticsData*, located within the */opt/tyk-gateway/plugins/example-plugin.so* shared object file. + +```yaml {linenos=table,hl_lines=["15-18"],linenostart=1} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: analytics-plugin +spec: + name: httpbin-analytics-plugin + active: true + protocol: http + proxy: + listen_path: /httpbin + strip_listen_path: true + target_url: http://httpbin.org + use_keyless: true + enable_detailed_recording: true + analytics_plugin: + enable: true + func_name: MaskAnalyticsData # Replace it with function name of your plugin + plugin_path: /opt/tyk-gateway/plugins/example-plugin.so # Replace it with path of your plugin file +``` + + + + + +
+ +## Advance Configuration + +There are two advance configuratin with plugin types: + +1. **[Per Endpoint Custom Plugin](#per-endpoint-custom-plugins)** +2. **[Plugin Caching Mechanism for Authentication Plugin](#plugin-caching-mechanism)** + +## Per-Endpoint Custom Plugins + +Tyk's custom plugin architecture allows you to deploy custom logic that will be invoked at certain points in the [middleware chain](/api-management/traffic-transformation#request-middleware-chain) as Tyk processes requests to your APIs. + +At the API-level, there are several points in the processing flow where custom plugins can be "hooked", as explained [here](/api-management/plugins/plugin-types#plugin-types). Each of these will be invoked for calls to any endpoint on an API. If you want to perform custom logic only for specific endpoints, you must include selective processing logic within the plugin. + +At the endpoint-level, Tyk provides the facility to attach a custom Golang plugin at the end of the request processing chain (immediately before the API-level post-plugin is executed). + +### When to use the per-endpoint custom plugin + +##### Aggregating data from multiple services + +From a custom plugin, you can make calls out to other internal and upstream APIs. You can then aggregate and process the responses, returning a single response object to the originating client. This allows you to configure a single externally facing API to simplify interaction with multiple internal services, leaving the heavy lifting to Tyk rather than standing up an aggregation service within your stack. + +##### Enforcing custom policies + +Tyk provides a very flexible middleware chain where you can combine functions to implement the access controls you require to protect your upstream services. Of course, not all scenarios can be covered by Tyk’s standard middleware functions, but you can use a custom plugin to apply whatever custom logic you require to optimize your API experience. + +##### Dynamic Routing + +With a custom plugin you can implement complex dynamic routing of requests made to a single external endpoint on to different upstream services. The flexibility of the virtual endpoint gives access to data within the request (including the key session) and also the ability to make calls to other APIs to make decisions on the routing of the request. It can operate as a super-powered URL rewrite middleware. + +### How the per-endpoint custom plugin works + +Tyk Gateway is written using Golang. This has a flexible plugin architecture which allows for custom code to be compiled separately from the gateway and then invoked natively by the gateway. When registering a custom Go plugin in the API definition, you must provide the location of the compiled plugin and also the name of the function to be invoked within that package. + +Go plugins must therefore be [compiled](/api-management/plugins/golang#plugin-compiler) and [loaded](/api-management/plugins/golang#loading-custom-go-plugins-into-tyk) into the Gateway in order that the function named in the plugin configuration in the API definition can be located and executed at the appropriate stage in the request middleware processing chain. + +The custom code within the plugin has access to contextual data such as the session object and API definition. If required, it can [terminate the request](/api-management/plugins/golang#terminating-the-request) and hence can provide a [Virtual Endpoint](/api-management/traffic-transformation/virtual-endpoints) style capability using the Go language, rather than JavaScript (as supported by the virtual endpoint middleware). This can then act as a high-performance replacement for the JavaScript virtual endpoints or for cases when you want to make use of external libraries. + +{/* proposed "summary box" to be shown graphically on each middleware page + ## Ignore Authentication middleware summary + - The Per-Endpoint Custom Plugin is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Per-Endpoint Custom Plugin can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + +### Using the Per-Endpoint Plugin with Tyk OAS APIs + +The [per-endpoint custom plugin](/api-management/plugins/plugin-types#per-endpoint-custom-plugins) provides the facility to attach a custom Go plugin at the end of the request processing chain. +This plugin allows you to add custom logic to the processing flow for the specific endpoint without adding to the processing complexity of other endpoints. +It can [terminate the request](/api-management/plugins/golang#terminating-the-request) if required, +and provides a [Virtual Endpoint](/api-management/traffic-transformation/virtual-endpoints) style capability using the Go language, rather than JavaScript (as supported by the virtual endpoint middleware). + +The middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](/api-management/plugins/plugin-types#using-the-per-endpoint-plugin-with-tyk-classic-apis) page. + +#### Using Tyk OAS API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. The `path` can contain wildcards in the form of any string bracketed by curly braces, for example `{user_id}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The endpoint plugin middleware (`postPlugins`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `postPlugins` object has the following configuration: + +- `enabled`: enable the middleware for the endpoint +- `functionName`: this is the name of the Go function that will be executed when the middleware is triggered +- `path`: the relative path to the source file containing the compiled Go code + +You can chain multiple plugin functions in an array. Tyk will process them in the order they appear in the API definition. + +For example: + +```json {hl_lines=["39-45"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-endpoint-plugin", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-endpoint-plugin", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-endpoint-plugin/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "postPlugins": [ + { + "enabled": true, + "functionName": "myUniqueFunctionName", + "path": "/middleware/myPlugin.so" + } + ] + } + } + } + } +} +``` + +In this example the per-endpoint custom plugin middleware has been configured for HTTP `GET` requests to the `/anything` endpoint. For any call made to this endpoint, Tyk will invoke the function `myUniqueFunctionName` in the file located at `/middleware/myPlugin.so`. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the per-endpoint custom plugin middleware. + +#### Using API Designer + +Adding a per-endpoint custom plugin to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Go Post-Plugin middleware** + + Select **ADD MIDDLEWARE** and choose **Go Post-Plugin** from the *Add Middleware* screen. + + Adding the Go Post-Plugin middleware + +3. **Configure the middleware** + + You must provide the path to the compiled plugin and the name of the Go function that should be invoked by Tyk Gateway when the middleware is triggered. + + Configuring the per-endpoint custom plugin + +4. **Save the API** + + Select **ADD MIDDLEWARE** to save the middleware configuration. Remember to select **SAVE API** to apply the changes. + + + + + You are only able to add one custom plugin to each endpoint when using the API Designer, however you can add more by editing the API definition directly in the Raw Definition editor. + + + +### Using the Per-Endpoint Plugin with Tyk Classic APIs + +The [per-endpoint custom plugin](/api-management/plugins/plugin-types#per-endpoint-custom-plugins) provides the facility to attach a custom Golang plugin at the end of the request processing chain. +This plugin allows you to add custom logic to the processing flow for the specific endpoint without adding to the processing complexity of other endpoints. +It can [terminate the request](/api-management/plugins/golang#terminating-the-request), if required, +and hence can provide a [Virtual Endpoint](/api-management/traffic-transformation/virtual-endpoints) style capability using the Go language, rather than JavaScript (as supported by the virtual endpoint middleware). + +This middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](/api-management/plugins/plugin-types#using-the-per-endpoint-plugin-with-tyk-oas-apis) page. + +#### Using Tyk Classic API Definition + +To enable the middleware you must add a new `go_plugin` object to the `extended_paths` section of your API definition. + +The `go_plugin` object has the following configuration: + +- `path`: the endpoint path +- `method`: the endpoint HTTP method +- `func_name`: this is the "symbol" or function name you are calling in your Go plugin once loaded - a function can be called by one or more APIs +- `plugin_path`: the relative path of the shared object containing the function you wish to call, one or many `.so` files can be called + +You can register multiple plugin functions for a single endpoint. Tyk will process them in the order they appear in the API definition. + +For example: +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "go_plugin": [ + { + "disabled": false, + "path": "/anything", + "method": "GET", + "plugin_path": "/middleware/myPlugin.so", + "func_name": "myUniqueFunctionName" + } + ] + } +} +``` + +In this example the per-endpoint custom plugin middleware has been configured for HTTP `GET` requests to the `/anything` endpoint. For any call made to this endpoint, Tyk will invoke the function `myUniqueFunctionName` in the file located at `/middleware/myPlugin.so`. + +#### Using API Designer + +You can use the API Designer in the Tyk Dashboard to add the per-endpoint custom plugin middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to trigger the custom plugin function. Select the **Go Plugin** plugin. + + Selecting the middleware + +2. **Locate the middleware in the raw API definition** + + Once you have selected the middleware for the endpoint, you need to switch to the *Raw Definition* view and then locate the `go_plugin` section (you can search within the text editor window). + + Locating the middleware configuration + +3. **Configure the middleware** + + Now you can directly edit the `plugin_path` and `func_name` to locate your compiled plugin function. + + Configuring the middleware + +4. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +## Plugin Caching Mechanism + +The **ID extractor** is a caching mechanism that's used in combination with Tyk Plugins. It is used specifically with plugins that implement **custom authentication mechanisms**. + +We use the term `ID` to describe any key that's used for authentication purposes. + +When a custom authentication mechanism is used, every API call triggers a call to the associated middleware function, if you're using a gRPC-based plugin this translates into a gRPC call. If you're using a native plugin -like a Python plugin-, this involves a Python interpreter call. + +The ID extractor works the following rich plugins: gRPC-based plugins, Python and Lua. + +### When to use the ID Extractor? + +The main idea of the ID extractor is to reduce the number of calls made to your plugin and cache the API keys that have been already authorized by your authentication mechanism. This means that after a successful authentication event, subsequent calls will be handled by the Tyk Gateway and its Redis cache, resulting in a performance similar to the built-in authentication mechanisms that Tyk provides. + +### When does the ID Extractor Run? + +When enabled, the ID extractor runs right before the authentication step, allowing it to take control of the flow and decide whether to call your authentication mechanism or not. + +If my ID is cached by this mechanism and my plugin isn't longer called, how do I expire it? +When you implement your own authentication mechanism using plugins, you initialise the session object from your own code. The session object has a field that's used to configure the lifetime of a cached ID, this field is called `id_extractor_deadline`. See [Plugin Data Structures](/api-management/plugins/rich-plugins#rich-plugins-data-structures) for more details. +The value of this field should be a UNIX timestamp on which the cached ID will expire, like `1507268142958`. It's an integer. + +For example, this snippet is used in a NodeJS plugin, inside a custom authentication function: + +``` +// Initialize a session state object + var session = new tyk.SessionState() + // Get the current UNIX timestamp + var timestamp = Math.floor( new Date() / 1000 ) + // Based on the current timestamp, add 60 seconds: + session.id_extractor_deadline = timestamp + 60 + // Finally inject our session object into the request object: + Obj.session = session +``` + +If you already have a plugin that implements a custom authentication mechanism, appending the `id_extractor_deadline` and setting its value is enough to activate this feature. +In the above sample, Tyk will cache the key for 60 seconds. During that time any requests that use the cached ID won't call your plugin. + +### How to enable the ID Extractor + +The ID extractor is configured on a per API basis. +The API should be a protected one and have the `enable_coprocess_auth` flag set to true, like the following definition: + +```json +{ + "name": "Test API", + "api_id": "my-api", + "org_id": "my-org", + "use_keyless": false, + "auth": { + "auth_header_name": "Authorization" + }, + "proxy": { + "listen_path": "/test-api/", + "target_url": "http://httpbin.org/", + "strip_listen_path": true + }, + "enable_coprocess_auth": true, + "custom_middleware_bundle": "bundle.zip" +} +``` + +If you're not using the Community Edition, check the API settings in the dashboard and make sure that "Custom Auth" is selected. + +The second requirement is to append an additional configuration block to your plugin manifest file, using the `id_extractor` key: + +```json +{ + "custom_middleware": { + "auth_check": { "name": "MyAuthCheck" }, + "id_extractor": { + "extract_from": "header", + "extract_with": "value", + "extractor_config": { + "header_name": "Authorization" + } + }, + "driver": "grpc" + } +} +``` + +* `extract_from` specifies the source of the ID to extract. +* `extract_with` specifies how to extract and parse the extracted ID. +* `extractor_config` specifies additional parameters like the header name or the regular expression to use, this is different for every choice, see below for more details. + + +### Available ID Extractor Sources + +#### Header Source + +Use this source to extract the key from a HTTP header. Only the name of the header is required: + +```json +{ + "id_extractor": { + "extract_from": "header", + "extract_with": "value", + "extractor_config": { + "header_name": "Authorization" + } + } +} +``` + +#### Form source + +Use this source to extract the key from a submitted form, where `param_name` represents the key of the submitted parameter: + + +```json +{ + "id_extractor": { + "extract_from": "form", + "extract_with": "value", + "extractor_config": { + "param_name": "my_param" + } + } +} +``` + + +### Available ID Extractor Modes + +#### Value Extractor + +Use this to take the value as its present. This is commonly used in combination with the header source: + +```json +{ + "id_extractor": { + "extract_from": "header", + "extract_with": "value", + "extractor_config": { + "header_name": "Authorization" + } + } +} +``` + +#### Regular Expression Extractor + +Use this to match the ID with a regular expression. This requires additional parameters like `regex_expression`, which represents the regular expression itself and `regex_match_index` which is the item index: + +```json +{ + "id_extractor": { + "extract_from": "header", + "extract_with": "regex", + "extractor_config": { + "header_name": "Authorization", + "regex_expression": "[^-]+$", + "regex_match_index": 0 + } + } +} +``` + +Using the example above, if we send a header like `prefix-d28e17f7`, given the regular expression we're using, the extracted ID value will be `d28e17f7`. + +### Example Session +Here's an example of a Session being built in GoLang custom middleware: +```{.copyWrapper} +extractorDeadline := time.Now().Add(time.Second * 5).Unix() +object.Session = &coprocess.SessionState{ + + LastUpdated: time.Now().String(), + Rate: 5, + Per: 10, + QuotaMax: int64(0), + QuotaRenews: time.Now().Unix(), + Metadata: map[string]string{ + "token": "my-unique-token", + }, + ApplyPolicies: ["5d8929d8f56e1a138f628269"], + } +``` +[source](https://github.com/TykTechnologies/tyk-grpc-go-basicauth-jwt/blob/master/main.go#L102) + +Note: When using an ID Extractor, you must set a `LastUpdated` or else token updates will not be applied. If you don't set an ID Extractor, Tyk will store session information in the cache based off the `token` field that is set in the metadata. + diff --git a/api-management/plugins/rich-plugins.mdx b/api-management/plugins/rich-plugins.mdx new file mode 100644 index 000000000..c01931fdb --- /dev/null +++ b/api-management/plugins/rich-plugins.mdx @@ -0,0 +1,3533 @@ +--- +title: "Rich Plugins" +description: "How to manage users, teams, permissions, rbac in Tyk Dashboard" +sidebarTitle: "Rich Plugins" +--- + +import GrpcInclude from '/snippets/grpc-include.mdx'; +import { ButtonLeft } from '/snippets/ButtonLeft.mdx'; + +## Introduction + +Rich plugins make it possible to write powerful middleware for Tyk. Tyk supports: + +* [Python](/api-management/plugins/rich-plugins#overview) +* [gRPC](/api-management/plugins/rich-plugins#overview-1) +* [Lua](/api-management/plugins/rich-plugins#using-lua) + +gRPC provides the ability to write plugins using many languages including C++, Java, Ruby and C#. + +The dynamically built Tyk binaries can expose and call Foreign Function Interfaces in guest languages that extend the functionality of a gateway process. + +The plugins are able to directly call some Tyk API functions from within their guest language. They can also be configured so that they hook into various points along the standard middleware chain. + + + +When using Python plugins, the middleware function names are set globally. So, if you include two or more plugins that implement the same function, the last declared plugin implementation of the function will be returned. We plan to add namespaces in the future. + + + +## How do rich plugins work ? + +### ID Extractor & Auth Plugins + +The ID Extractor is a caching mechanism that's used in combination with Tyk Plugins. It can be used specifically with plugins that implement custom authentication mechanisms. The ID Extractor works for all rich plugins: gRPC-based plugins, Python and Lua. + +See [ID Extractor](/api-management/plugins/plugin-types#plugin-caching-mechanism) for more details. + +### Interoperability + +This feature implements an in-process message passing mechanism, based on [Protocol Buffers](https://developers.google.com/protocol-buffers/), any supported languages should provide a function to receive, unmarshal and process this kind of messages. + +The main interoperability task is achieved by using [cgo](https://golang.org/cmd/cgo/) as a bridge between a supported language -like Python- and the Go codebase. + +Your C bridge function must accept and return a `CoProcessMessage` data structure like the one described in [`api.h`](https://github.com/TykTechnologies/tyk/blob/master/coprocess/api.h), where `p_data` is a pointer to the serialised data and `length` indicates the length of it. + +```{.copyWrapper} +struct CoProcessMessage { + void* p_data; + int length; +}; +``` + +The unpacked data will hold the actual `CoProcessObject` data structure. + +- `HookType` - the hook type (see below) +- `Request` - the HTTP request +- `Session` - the [Tyk session object](/api-management/policies#session-object). +- `Metadata` - the metadata from the session data above (key/value string map). +- `Spec` - the API specification data. Currently organization ID, API ID and config_data. + +```{.copyWrapper} +type CoProcessObject struct { + HookType string + Request CoProcessMiniRequestObject + Session SessionState + Metadata map[string]string + Spec map[string]string +} +``` + +### Coprocess Dispatcher + +`Coprocess.Dispatcher` describes a very simple interface for implementing the dispatcher logic, the required methods are: `Dispatch`, `DispatchEvent` and `Reload`. + +`Dispatch` accepts a pointer to a struct `CoProcessObject` (as described above) and must return an object of the same type. This method is called for every configured hook on every request. +Typically, it performs a single function call in the target language (such as `Python_DispatchHook` in `coprocess_python`), where the corresponding logic is handledβ€”mainly because different languages have different ways of loading, referencing, or calling middleware. + +`DispatchEvent` provides a way of dispatching Tyk events to a target language. This method doesn't return any variables but does receive a JSON-encoded object containing the event data. For extensibility purposes, this method doesn't use Protocol Buffers, the input is a `[]byte`, the target language will take this (as a `char`) and perform the JSON decoding operation. + +`Reload` is called when triggering a hot reload, this method could be useful for reloading scripts or modules in the target language. + +### Coprocess Dispatcher - Hooks + +This component is in charge of dispatching your HTTP requests to the custom middleware. The list, from top to bottom, shows the order of execution. The dispatcher follows the standard middleware chain logic and provides a simple mechanism for "hooking" your custom middleware behavior, the supported hooks are: + +* **Pre**: gets executed before the request is sent to your upstream target and before any authentication information is extracted from the header or parameter list of the request. When enabled, this applies to both keyless and protected APIs. +* **AuthCheck**: gets executed as a custom authentication middleware, instead of the standard ones provided by Tyk. Use this to provide your own authentication mechanism. +* **PostKeyAuth**: gets executed right after the authentication process. +* **Post**: gets executed after the authentication, validation, throttling, and quota-limiting middleware has been executed, just before the request is proxied upstream. Use this to post-process a request before sending it to your upstream API. This is only called when using protected APIs. If you want to call a hook after the authentication but before the validation, throttling and other middleware, see **PostKeyAuth**. +* **Response**: gets executed after the upstream API replies. The arguments passed to this hook include both the request and response data. Use this to modify the HTTP response before it's sent to the client. This hook also receives the request object, the session object, the metadata and API definition associated with the request. + + + + + Response hooks are not available for native Go plugins. Python and gRPC plugins are supported. + + + + +### Coprocess Gateway API + +[`coprocess_api.go`](https://github.com/TykTechnologies/tyk/tree/master/coprocess) provides a bridge between the Gateway API and C. Any function that needs to be exported should have the `export` keyword: + +```{.copyWrapper} +//export TykTriggerEvent +func TykTriggerEvent( CEventName *C.char, CPayload *C.char ) { + eventName := C.GoString(CEventName) + payload := C.GoString(CPayload) + + FireSystemEvent(tykcommon.TykEvent(eventName), EventMetaDefault{ + Message: payload, + }) +} +``` + +You should also expect a header file declaration of this function in [`api.h`](https://github.com/TykTechnologies/tyk/blob/master/coprocess/api.h), like this: + +```{.copyWrapper} +#ifndef TYK_COPROCESS_API +#define TYK_COPROCESS_API +extern void TykTriggerEvent(char* event_name, char* payload); +#endif +``` + +The language binding will include this header file (or declare the function inline) and perform the necessary steps to call it with the appropriate arguments (like an FFI mechanism could do). As a reference, this is how this could be achieved if you're building a [Cython](http://cython.org/) module: + +```{.copyWrapper} +cdef extern: + void TykTriggerEvent(char* event_name, char* payload); + +def call(): + event_name = 'my event'.encode('utf-8') + payload = 'my payload'.encode('utf-8') + TykTriggerEvent( event_name, payload ) +``` + +### Basic usage + +The intended way of using a Coprocess middleware is to specify it as part of an API Definition: + +```{.json} +"custom_middleware": { + "pre": [ + { + "name": "MyPreMiddleware", + "require_session": false + }, + { + "name": "AnotherPreMiddleware", + "require_session": false + } + ], + "post": [ + { + "name": "MyPostMiddleware", + "require_session": false + } + ], + "post_key_auth": [ + { + "name": "MyPostKeyAuthMiddleware", + "require_session": true + } + ], + "auth_check": { + "name": "MyAuthCheck" + }, + "driver": "python" +} +``` + + +All hook types support chaining except the custom auth check (`auth_check`). + + + +--- + +## Rich Plugins Data Structures + +This section describes the data structures used by the Tyk rich plugins. + +The coprocess object is a message dispatched by Tyk to the gRPC server handling the custom plugins. + +The Tyk [Protocol Buffer definitions](https://github.com/TykTechnologies/tyk/tree/master/coprocess/proto) are intended for users to generate their own bindings using the appropriate gRPC tools for the required target language. +The remainder of this document illustrates a class diagram and explins the attributes of the protobuf messages. + +### Coprocess Object + +The `Coprocess.Object` data structure wraps a `Coprocess.MiniRequestObject` and `Coprocess.ResponseObject` It contains additional fields that are useful for users that implement their own request dispatchers, like the middleware hook type and name. +It also includes the session state object (`SessionState`), which holds information about the current key/user that's used for authentication. + +```protobuf +message Object { + HookType hook_type = 1; + string hook_name = 2; + MiniRequestObject request = 3; + SessionState session = 4; + map metadata = 5; + map spec = 6; + ResponseObject response = 7; +} +``` + +This class diagram presents the structure of the object: + + + + +#### Field Descriptions + +`hook_type` +Contains the middleware hook type: pre, post, custom auth. + +`hook_name` +Contains the hook name. + +`request` +Contains the request object, see `MiniRequestObject` for more details. + +`session` +Contains the session object, see `SessionState` for more details. + +`metadata` +Contains the metadata. This is a dynamic field. + +`spec` +Contains information about API definition, including `APIID`, `OrgID` and `config_data`. + +`response` +Contains information populated from the upstream HTTP response data, for response hooks. See [ResponseObject](#responseobject) for more details. All the field contents can be modified. + +### MiniRequestObject + +The `Coprocess.MiniRequestObject` is the main request data structure used by rich plugins. It's used for middleware calls and contains important fields like headers, parameters, body and URL. A `MiniRequestObject` is part of a `Coprocess.Object`. + +```protobuf +message MiniRequestObject { + map headers = 1; + map set_headers = 2; + repeated string delete_headers = 3; + string body = 4; + string url = 5; + map params = 6; + map add_params = 7; + map extended_params = 8; + repeated string delete_params = 9; + ReturnOverrides return_overrides = 10; + string method = 11; + string request_uri = 12; + string scheme = 13; + bytes raw_body = 14; +} +``` + +#### Field Descriptions + +`headers` +A read-only field for reading headers injected by previous middleware. Modifying this field won't alter the request headers See `set_headers` and `delete_headers` for this. + +`set_headers` +This field appends the given headers (keys and values) to the request. + +`delete_headers` +This field contains an array of header names to be removed from the request. + +`body` +Contains the request body. See `ReturnOverrides` for response body modifications. + +`raw_body` +Contains the raw request body (bytes). + +`url` +The request URL. + +`params` +A read-only field that contains the request params. Modifying this value won't affect the request params. + +`add_params` +Add paramaters to the request. + +`delete_params` +This field contains an array of parameter keys to be removed from the request. + +`return_overrides` +See `ReturnOverrides` for more information. + +`method` +The request method, e.g. GET, POST, etc. + +`request_uri` +Raw unprocessed URL which includes query string and fragments. + +`scheme` +Contains the URL scheme, e.g. `http`, `https`. + +--- + +### ResponseObject + +The `ResponseObject` exists within an [object](#coprocess-object) for response hooks. The fields are populated with the upstream HTTP response data. All the field contents can be modified. + +```protobuf +syntax = "proto3"; + +package coprocess; + +message ResponseObject { + int32 status_code = 1; + bytes raw_body = 2; + string body = 3; + map headers = 4; + repeated Header multivalue_headers = 5; +} + +message Header { + string key = 1; + repeated string values = 2; +} +``` + +#### Field Descriptions + +`status_code` +This field indicates the HTTP status code that was sent by the upstream. + +`raw_body` +This field contains the HTTP response body (bytes). It's always populated. + +`body` +This field contains the HTTP response body in string format. It's not populated if the `raw_body` contains invalid UTF-8 characters. + +`headers` +A map that contains the headers sent by the upstream. + +`multivalue_headers` +A list of headers, each header in this list is a structure that consists of two parts: a key and its corresponding values. +The key is a string that denotes the name of the header, the values are a list of strings that hold the content of the header, this is useful when the header has multiple associated values. +This field is available for Go, Python and Ruby since tyk v5.0.4 and 5.1.1+. + +--- + +### ReturnOverrides + +The `ReturnOverrides` object, when returned as part of a `Coprocess.Object`, overrides the response of a given HTTP request. It also stops the request flow and the HTTP request isn't passed upstream. The fields specified in the `ReturnOverrides` object are used as the HTTP response. +A sample usage for `ReturnOverrides` is when a rich plugin needs to return a custom error to the user. + +```protobuf +syntax = "proto3"; + +package coprocess; + +message ReturnOverrides { + int32 response_code = 1; + string response_error = 2; + map headers = 3; + bool override_error = 4; + string response_body = 5; +} +``` + +#### Field Descriptions + +`response_code` +This field overrides the HTTP response code and can be used for error codes (403, 500, etc.) or for overriding the response. + +`response_error` +This field overrides the HTTP response body. + +`headers` +This field overrides response HTTP headers. + +`override_error` +This setting provides enhanced customization for returning custom errors. It should be utilized alongside `response_body` for optimal effect. + +`response_body` +This field serves as an alias for `response_erro`r and holds the HTTP response body. + +--- + +### SessionState + + +A `SessionState` data structure is created for every authenticated request and stored in Redis. It's used to track the activity of a given key in different ways, mainly by the built-in Tyk middleware like the quota middleware or the rate limiter. +A rich plugin can create a `SessionState` object and store it in the same way built-in authentication mechanisms do. This is what a custom authentication middleware does. This is also part of a `Coprocess.Object`. +Returning a null session object from a custom authentication middleware is considered a failed authentication and the appropriate HTTP 403 error is returned by the gateway (this is the default behavior) and can be overridden by using `ReturnOverrides`. + +#### Field Descriptions + +`last_check` +No longer used. + +`allowance` +No longer in use, should be the same as `rate`. + +`rate` +The number of requests that are allowed in the specified rate limiting window. + +`per` +The number of seconds that the rate window should encompass. + +`expires` +An epoch that defines when the key should expire. + +`quota_max` +The maximum number of requests allowed during the quota period. + +`quota_renews` +An epoch that defines when the quota renews. + +`quota_remaining` +Indicates the remaining number of requests within the user's quota, which is independent of the rate limit. + +`quota_renewal_rate` +The time in seconds during which the quota is valid. So for 1000 requests per hour, this value would be 3600 while `quota_max` and `quota_remaining` would be 1000. + +`access_rights` +Defined as a `map` instance, that maps the session's API ID to an [AccessDefinition](#access-definition). The AccessDefinition defines the [access rights](/api-management/policies#setting-granular-paths-on-a-per-key-basis) for the API in terms of allowed: versions and URLs(endpoints). Each URL (endpoint) has a list of allowed methods. For further details consult the tutorials for how to create a [security policy](/api-management/gateway-config-managing-classic#secure-an-api) for Tyk Cloud, Tyk Self Managed and Tyk OSS platforms. + +`org_id` +The organization this user belongs to. This can be used in conjunction with the org_id setting in the API Definition object to have tokens "owned" by organizations. + +`oauth_client_id` +This is set by Tyk if the token is generated by an OAuth client during an OAuth authorization flow. + +`basic_auth_data` +This section contains a hashed representation of the basic auth password and the hashing method used. +For further details see [BasicAuthData](#basicauthdata). + +`jwt_data` +Added to sessions where a Tyk key (embedding a shared secret) is used as the public key for signing the JWT. The JWT token's KID header value references the ID of a Tyk key. See [JWTData](#jwtdata) for an example. + +`hmac_enabled` +When set to `true` this indicates generation of a [HMAC signature](/basic-config-and-security/security/authentication-authorization/hmac-signatures) using the secret provided in `hmac_secret`. If the generated signature matches the signature provided in the *Authorization* header then authentication of the request has passed. + +`hmac_secret` +The value of the HMAC shared secret. + +`is_inactive` +Set this value to true to deny access. + +`apply_policy_id` +The policy ID that is bound to this token. + + + +Although `apply_policy_id` is still supported, it is now deprecated. `apply_policies` is now used to list your policy IDs as an array. This supports the **[Multiple Policy](/api-management/policies#partitioned-policy-functionality)** feature introduced in the **v2.4 - 1.4** release. + + + +`data_expires` +A value, in seconds, that defines when data generated by this token expires in the analytics DB (must be using Pro edition and MongoDB). + +`monitor` +Defines a [quota monitor](/api-management/gateway-events#monitoring-quota-consumption) containing a list of percentage threshold limits in descending order. These limits determine when webhook notifications are triggered for API users or an organization. Each threshold represents a percentage of the quota that, when reached, triggers a notification. See [Monitor](#monitor) for further details and an example. + +`enable_detailed_recording` +Set this value to true to have Tyk store the inbound request and outbound response data in HTTP Wire format as part of the analytics data. + +`metadata` +Metadata to be included as part of the session. This is a key/value string map that can be used in other middleware such as transforms and header injection to embed user-specific data into a request, or alternatively to query the providence of a key. + +`tags` +Tags are embedded into analytics data when the request completes. If a policy has tags, those tags will supersede the ones carried by the token (they will be overwritten). + +`alias` +As of v2.1, an Alias offers a way to identify a token in a more human-readable manner, add an Alias to a token in order to have the data transferred into Analytics later on so you can track both hashed and un-hashed tokens to a meaningful identifier that doesn't expose the security of the underlying token. + +`last_updated` +A UNIX timestamp that represents the time the session was last updated. Applicable to *Post*, *PostAuth* and *Response* plugins. When developing *CustomAuth* plugins developers should add this to the SessionState instance. + +`id_extractor_deadline` +This is a UNIX timestamp that signifies when a cached key or ID will expire. This relates to custom authentication, where authenticated keys can be cached to save repeated requests to the gRPC server. See [id_extractor](/api-management/plugins/plugin-types#plugin-caching-mechanism) and [Auth Plugins](/api-management/plugins/plugin-types#authentication-plugins) for additional information. + +`session_lifetime` +UNIX timestamp that denotes when the key will automatically expire. AnyΒ·subsequent API request made using the key will be rejected. Overrides the global session lifetime. See [Key Expiry and Deletion](/api-management/policies#set-physical-key-expiry-and-deletion) for more information. + +`key_id` +This is the unique identifier for the access token used to authenticate the request, introduced in v5.9.0. + +--- + +### AccessDefinition + + +```protobuf +message AccessDefinition { + string api_name = 1; + string api_id = 2; + repeated string versions = 3; + repeated AccessSpec allowed_urls = 4; +} +``` + +Defined as an attribute within a [SessionState](#session-state) instance. Contains the allowed versions and URLs (endpoints) for the API that the session request relates to. Each URL (endpoint) specifies an associated list of allowed methods. See also [AccessSpec](#access-spec). + +#### Field Descriptions + +`api_name` +The name of the API that the session request relates to. + +`api_id` +The ID of the API that the session request relates to. + +`versions` +List of allowed API versions, e.g. `"versions": [ "Default" ]`. + +`allowed_urls` List of [AccessSpec](#access-spec) instances. Each instance defines a URL (endpoint) with an associated allowed list of methods. If all URLs (endpoints) are allowed then the attribute is not set. + +--- + +### AccessSpec + + +Defines an API's URL (endpoint) and associated list of allowed methods + +```protobuf +message AccessSpec { + string url = 1; + repeated string methods = 2; +} +``` + +#### Field Descriptions + +`url` +A URL (endpoint) belonging to the API associated with the request session. + +`methods` +List of allowed methods for the URL (endpoint), e.g. `"methods": [ "GET". "POST", "PUT", "PATCH" ]`. + +--- + +### BasicAuthData + +The `BasicAuthData` contains a hashed password and the name of the hashing algorithm used. This is represented by the `basic_auth_data` attribute in [SessionState](#session-state) message. + +```yaml +"basicAuthData": { + "password": , + "hash": +} +``` + +#### Field Descriptions + +`password` +A hashed password. + +`hash` +Name of the [hashing algorithm](/api-management/policies#access-key-hashing) used to hash the password. + +--- + +### JWTData + +Added to [sessions](#session-state) where a Tyk key (embedding a shared secret) is used as the public key for signing the JWT. This message contains the shared secret. + +```yaml +"jwtData": { + "secret": "the_secret" +} +``` + +#### Field Descriptions + +`secret` +The shared secret. + +--- + +### Monitor + + +Added to a [session](#session-state) when [monitor quota thresholds](/api-management/gateway-events#monitoring-quota-consumption) are defined within the Tyk key. This message contains the quota percentage threshold limits, defined in descending order, that trigger webhook notification. + +```yaml +message Monitor { + repeated double trigger_limits = 1; +} +``` + +#### Field Descriptions + +`trigger_limits` +List of trigger limits defined in descending order. Each limit represents the percentage of the quota that must be reached in order for the webhook notification to be triggered. + +```yaml +"monitor": { + "trigger_limits": [80.0, 60.0, 50.0] +} +``` + +--- + +
+ + + +## Using Python + +### Overview + +#### Requirements + +Since v2.9, Tyk supports any currently stable [Python 3.x version](https://www.python.org/downloads/). The main requirement is to have the Python shared libraries installed. These are available as `libpython3.x` in most Linux distributions. + +- Python3-dev +- [Protobuf](https://pypi.org/project/protobuf/): provides [Protocol Buffers](https://developers.google.com/protocol-buffers/) support +- [gRPC](https://pypi.org/project/grpcio/): provides [gRPC](http://www.grpc.io/) support + +#### Important Note Regarding Performance + +Python plugins are [embedded](https://docs.python.org/3/extending/embedding.html) within the Tyk Gateway process. Tyk Gateway integrates with Python custom plugins via a [cgo](https://golang.org/cmd/cgo) bridge. + +`Tyk Gateway` <-> CGO <-> `Python Custom Plugin` + +In order to integrate with Python custom plugins, the *libpython3.x.so* shared object library is used to embed a Python interpreter directly in the Tyk Gateway. Further details can be found [here](/api-management/plugins/rich-plugins#coprocess-gateway-api) + +This allows combining the strengths of both Python and Go in a single application. However, it's essential to be aware of the potential complexities and performance implications of mixing languages, as well as the need for careful memory management when working with Python objects from Go. + +The Tyk Gateway process initialises the Python interpreter using [Py_initialize](https://docs.python.org/3/c-api/init.html#c.Py_Initialize). The Python [Global Interpreter Lock (GIL)](https://docs.python.org/3/glossary.html/#term-global-interpreter-lock) allows only one thread to execute Python bytecode at a time, ensuring thread safety and simplifying memory management. While the GIL simplifies these aspects, it can limit the scalability of multi-threaded applications, particularly those with CPU-bound tasks, as it restricts parallel execution of Python code. + +In the context of custom Python plugins, API calls are queued and the Python interpreter handles requests sequentially, processing them one at a time. Subsequently, this would consume large amounts of memory, and network sockets would remain open and blocked until the API request is processed. + +#### Install the Python development packages + + + + + + +Starting from Tyk Gateway version `v5.3.0`, Python is no longer bundled with the official Tyk Gateway Docker image by default, to address security vulnerabilities in the Python libraries highlighted by [Docker Scout](https://docs.docker.com/scout/). +
+Whilst Python plugins are still supported by Tyk Gateway, if you want to use them you must extend the image to add support for Python. For further details, please refer to the [release notes](/developer-support/release-notes/gateway) for Tyk Gateway `v5.3.0`. +
+ + +If you wish to use Python plugins using Docker, you can extend the official Tyk Gateway Docker image by adding Python to it. + +This example Dockerfile extends the official Tyk Gateway image to support Python plugins by installing python and the required modules: + +```dockerfile +ARG BASE_IMAGE +FROM ${BASE_IMAGE} AS base + +FROM python:3.11-bookworm +COPY --from=base /opt/tyk-gateway/ /opt/tyk-gateway/ +RUN pip install setuptools && pip install google && pip install 'protobuf==4.24.4' + +EXPOSE 8080 80 443 + +ENV PYTHON_VERSION=3.11 +ENV PORT=8080 + +WORKDIR /opt/tyk-gateway/ + +ENTRYPOINT ["/opt/tyk-gateway/tyk" ] +CMD [ "--conf=/opt/tyk-gateway/tyk.conf" ] +``` + +To use this, you simply run `docker build` with this Dockerfile, providing the Tyk Gateway image that you would like to extend as build argument `BASE_IMAGE`. +As an example, this command will extend Tyk Gateway `v5.3.0` to support Python plugins, generating the image `tyk-gateway-python:v5.3.0`: + +```bash +docker build --build-arg BASE_IMAGE=tykio/tyk-gateway:v5.3.0 -t tyk-gateway-python:v5.3.0 . +``` + +
+ + + +```apt +apt install python3 python3-dev python3-pip build-essential +``` + +#### Install the Required Python Modules + +Make sure that "pip" is available in your system, it should be typically available as "pip", "pip3" or "pipX.X" (where X.X represents the Python version): + +```pip3 +pip3 install protobuf grpcio +``` + + + + + +```yum +yum install python3-devel python3-setuptools +python3 -m ensurepip +``` + +#### Install the Required Python Modules + +Make sure that "pip" is now available in your system, it should be typically available as "pip", "pip3" or "pipX.X" (where X.X represents the Python version): + +```pip3 +pip3 install protobuf grpcio +``` + + + +
+ +#### Python versions + +Newer Tyk versions provide more flexibility when using Python plugins, allowing the users to set which Python version to use. By default, Tyk will try to use the latest version available. + +To see the Python initialisation log, run the Tyk gateway in debug mode. + +To use a specific Python version, set the `python_version` flag under `coprocess_options` in the Tyk Gateway configuration file (tyk.conf). + + + +Tyk doesn't support Python 2.x. + + + +#### Troubleshooting + +To verify that the required Python Protocol Buffers module is available: + +```python3 +python3 -c 'from google import protobuf' +``` + +No output is expected from this command on successful setups. + +#### How do I write Python Plugins? + +We have created [a demo Python plugin repository](https://github.com/TykTechnologies/tyk-plugin-demo-python). + +The project implements a simple middleware for header injection, using a Pre hook (see [Tyk custom middleware hooks](/api-management/plugins/rich-plugins#coprocess-dispatcher---hooks). A single Python script contains the code for it, see [middleware.py](https://github.com/TykTechnologies/tyk-plugin-demo-python/blob/master/middleware.py). + + +### Custom Authentication Plugin Tutorial + +#### Introduction +This tutorial will guide you through the creation of a custom authentication plugin, written in Python. +A custom authentication plugin allows you to implement your own authentication logic and override the default Tyk authentication mechanism. The sample code implements a very simple key check; currently it supports a single, hard-coded key. It could serve as a starting point for your own authentication logic. We have tested this plugin with Ubuntu 14. + +The code used in this tutorial is also available in [this GitHub repository](https://github.com/TykTechnologies/tyk-plugin-demo-python). + +#### Requirements + +* Tyk API Gateway: This can be installed using standard package management tools like Yum or APT, or from source code. See [here](/tyk-self-managed/install) for more installation options. + +##### Dependencies + +* The Tyk CLI utility, which is bundled with our RPM and DEB packages, and can be installed separately from [https://github.com/TykTechnologies/tyk-cli](https://github.com/TykTechnologies/tyk-cli) +* In Tyk 2.8 the Tyk CLI is part of the gateway binary, you can find more information by running "tyk help bundle". +* Python 3.4 + +#### Create the Plugin +The first step is to create a new directory for your plugin file: + +```bash +mkdir ~/my-tyk-plugin +cd ~/my-tyk-plugin +``` + +Next you need to create a manifest file. This file contains information about our plugin file structure and how you expect it to interact with the API that will load it. +This file should be named `manifest.json` and needs to contain the following content: + +```json +{ + "file_list": [ + "middleware.py" + ], + "custom_middleware": { + "driver": "python", + "auth_check": { + "name": "MyAuthMiddleware" + } + } +} +``` + +* The `file_list` block contains the list of files to be included in the bundle, the CLI tool expects to find these files in the current working directory. +* The `custom_middleware` block contains the middleware settings like the plugin driver we want to use (`driver`) and the hooks that our plugin will expose. You use the `auth_check` for this tutorial. For other hooks see [here](/api-management/plugins/rich-plugins#coprocess-dispatcher---hooks). +* The `name` field references the name of the function that you implement in your plugin code: `MyAuthMiddleware`. +* You add an additional file called `middleware.py`, this will contain the main implementation of our middleware. + + + + + Your bundle should always contain a file named `middleware.py` as this is the entry point file. + + + +##### Contents of middleware.py + +You import decorators from the Tyk module as this gives you the `Hook` decorator, and you import [Tyk Python API helpers](/api-management/plugins/rich-plugins#tyk-python-api-methods) + +You implement a middleware function and register it as a hook, the input includes the request object, the session object, the API meta data and its specification: + +```python +from tyk.decorators import * +from gateway import TykGateway as tyk + +@Hook +def MyAuthMiddleware(request, session, metadata, spec): + auth_header = request.get_header('Authorization') + if auth_header == '47a0c79c427728b3df4af62b9228c8ae': + tyk.log("I'm logged!", "info") + tyk.log("Request body" + request.object.body, "info") + tyk.log("API config_data" + spec['config_data'], "info") + session.rate = 1000.0 + session.per = 1.0 + metadata["token"] = "47a0c79c427728b3df4af62b9228c8ae" + return request, session, metadata +``` + + +You can modify the `manifest.json` to add as many files as you want. Files that aren't listed in the `manifest.json` file will be ignored when building the plugin bundle. + +#### Building the Plugin + +A plugin bundle is a packaged version of the plugin, it may also contain a cryptographic signature of its contents. The `-y` flag tells the Tyk CLI tool to skip the signing process in order to simplify the flow of this tutorial. For more information on the Tyk CLI tool, see [here](/api-management/plugins/overview#plugin-bundles). + +You will use the Dockerised version of the Tyk CLI tool to bundle our package. + +First, export your Tyk Gateway version to a variable. +```bash +##### THIS MUST MATCH YOUR TYK GATEWAY VERSION +$ IMAGETAG=v3.1.2 +``` + +Then run the following commands to generate a `bundle.zip` in your current directory: +```docker +$ docker run \ + --rm -w "/tmp" -v $(pwd):/tmp \ + --entrypoint "/bin/sh" -it \ + tykio/tyk-gateway:$IMAGETAG \ + -c '/opt/tyk-gateway/tyk bundle build -y' +``` + +**Success!** + +You should now have a `bundle.zip` file in the plugin directory. + +#### Publishing the Plugin + +To allow Tyk access to the plugin bundle, you need to serve this file using a web server. For this tutorial we'll use the Python built-in HTTP server (check the official docs for additional information). This server listens on port 8000 by default. To start it use: + +`python3 -m http.server` + +When the server is started our current working directory is used as the web root path, this means that our `bundle.zip` file should be accessible from the following URL: + +`http://:8000/bundle.zip` + +The Tyk Gateway fetches and loads a plugin bundle during startup time and subsequent reloads. For updating plugins using the hot reload feature, you should use different plugin bundle names as you expect them to be used for versioning purposes, e.g. bundle-1, bundle-2, etc. +If a bundle already exists, Tyk will skip the download process and load the version that's already present. + +#### Configure Tyk + +You will need to modify the Tyk global configuration file (`tyk.conf`) to use Python plugins. The following block should be present in this file: + +```json +"coprocess_options": { + "enable_coprocess": true, + "python_path_prefix": "/opt/tyk-gateway" +}, +"enable_bundle_downloader": true, +"bundle_base_url": "http://dummy-bundle-server.com/bundles/", +"public_key_path": "/path/to/my/pubkey" +``` + +##### Options + +* `enable_coprocess`: This enables the plugin +* `python_path_prefix`: Sets the path to built-in Tyk modules, this will be part of the Python module lookup path. The value used here is the default one for most installations. +* `enable_bundle_downloader`: This enables the bundle downloader +* `bundle_base_url`: This is a base URL that will be used to download the bundle. You should replace the `bundle_base_url` with the appropriate URL of the web server that's serving your plugin bundles. For now HTTP and HTTPS are supported but we plan to add more options in the future (like pulling directly from S3 buckets). You use the URL that's exposed by the test HTTP server in the previous step. +* `public_key_path`: Modify `public_key_path` in case you want to enforce the cryptographic check of the plugin bundle signatures. If the `public_key_path` isn't set, the verification process will be skipped and unsigned plugin bundles will be loaded normally. + +#### Configure an API Definition + +There are two important parameters that you need to add or modify in the API definition. +The first one is `custom_middleware_bundle` which must match the name of the plugin bundle file. If we keep this with the default name that the Tyk CLI tool uses, it will be `bundle.zip`. + +`"custom_middleware_bundle": "bundle.zip"` + +The second parameter is specific to this tutorial, and should be used in combination with `use_keyless` to allow an API to authenticate against our plugin: + +`"use_keyless": false` +`"enable_coprocess_auth": true` + +`"enable_coprocess_auth"` will instruct the Tyk gateway to authenticate this API using the associated custom authentication function that's implemented by the plugin. + +#### Configuration via the Tyk Dashboard + +To attach the plugin to an API, From the **Advanced Options** tab in the **API Designer** enter **bundle.zip** in the **Plugin Bundle ID** field. + +Plugin Options + +You also need to modify the authentication mechanism that's used by the API. +From the **Core Settings** tab in the **API Designer** select **Use Custom Authentication (Python, CoProcess, and JSVM plugins)** from the **Authentication - Authentication Mode** drop-down list. + +Advanced Options + +#### Testing the Plugin + +Now you can simply make an API call against the API for which we've loaded the Python plugin. + + +##### If Running Tyk Gateway from Source + +At this point you have your test HTTP server ready to serve the plugin bundle and the configuration with all the required parameters. +The final step is to start or restart the **Tyk Gateway** (this may vary depending on how you setup Tyk). +A separate service is used to load the Tyk version that supports Python (`tyk-gateway-python`), so we need to stop the standard one first (`tyk-gateway`): + +```service +service tyk-gateway stop +service tyk-gateway-python start +``` + +From now on you should use the following command to restart the service: + +```service +service tyk-gateway-python restart +``` + +A cURL request will be enough for testing our custom authentication middleware. + +This request will trigger a bad authentication: + +```curl +curl http://:8080/my-api/my-path -H 'Authorization: badtoken' +``` + +This request will trigger a successful authentication. You are using the token that's set by your Python plugin: + +```curl +curl http://:8080/my-api/my-path -H 'Authorization: 47a0c79c427728b3df4af62b9228c8ae' +``` + +#### What's Next? + +In this tutorial you learned how Tyk plugins work. For a production-level setup we suggest the following steps: + +* Configure Tyk to use your own key so that you can enforce cryptographic signature checks when loading plugin bundles, and sign your plugin bundles! +* Configure an appropriate web server and path to serve your plugin bundles. + + +### Add Python Plugin To Your Gateway + +#### API settings + +To add a Python plugin to your API, you must specify the bundle name using the `custom_middleware_bundle` field: + +```{.json} +{ + "name": "Tyk Test API", + "api_id": "1", + "org_id": "default", + "definition": { + "location": "header", + "key": "version" + }, + "auth": { + "auth_header_name": "authorization" + }, + "use_keyless": true, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "expires": "3000-01-02 15:04", + "use_extended_paths": true, + "extended_paths": { + "ignored": [], + "white_list": [], + "black_list": [] + } + } + } + }, + "proxy": { + "listen_path": "/quickstart/", + "target_url": "http://httpbin.org", + "strip_listen_path": true + }, + "custom_middleware_bundle": "test-bundle" +} +``` + +#### Global settings + +To enable Python plugins you need to add the following block to `tyk.conf`: + +```{.copyWrapper} +"coprocess_options": { + "enable_coprocess": true, + "python_path_prefix": "/opt/tyk-gateway" +}, +"enable_bundle_downloader": true, +"bundle_base_url": "http://dummy-bundle-server.com/bundles/", +"public_key_path": "/path/to/my/pubkey", +``` + +`enable_coprocess`: enables the rich plugins feature. + +`python_path_prefix`: Sets the path to built-in Tyk modules, this will be part of the Python module lookup path. The value used here is the default one for most installations. + +`enable_bundle_downloader`: enables the bundle downloader. + +`bundle_base_url`: is a base URL that will be used to download the bundle, in this example we have `test-bundle` specified in the API settings, Tyk will fetch the URL for your specified bundle server (in the above example): `dummy-bundle-server.com/bundles/test-bundle`. You need to create and then specify your own bundle server URL. + +`public_key_path`: sets a public key, this is used for verifying signed bundles, you may omit this if unsigned bundles are used. + +### Tyk Python API methods + +Python plugins may call these Tyk API methods: + +#### store_data(key, value, ttl) + +`store_data` sets a Redis `key` with the specified `value` and `ttl`. + +#### get_data(key) + +`get_data` retrieves a Redis `key`. + +#### trigger_event(event_name, payload) + +`trigger_event` triggers an internal Tyk event, the `payload` must be a JSON object. + +#### log(msg, level) + +`log` will log a message (`msg`) using the specified `level`. + +#### log_error(*args) + +`log_error` is a shortcut for `log`, it uses the error log level. + +### Python Performance + +These are some benchmarks performed on Python plugins. Python plugins run in a standard Python interpreter, embedded inside Tyk. + +Python Performance + +Python Performance + +--- + +## Using gRPC + +### Overview + +gRPC is a very powerful framework for RPC communication across different [languages](https://www.grpc.io/docs). It was created by Google and makes heavy use of HTTP2 capabilities and the [Protocol Buffers](https://developers.google.com/protocol-buffers/) serialisation mechanism to dispatch and exchange requests between Tyk and your gRPC plugins. + +When it comes to built-in plugins, we have been able to integrate several languages like Python, Javascript & Lua in a native way: this means the middleware you write using any of these languages runs in the same process. At the time of writing, the following languages are supported: C++, Java, Objective-C, Python, Ruby, Go, C# and Node.JS. + +For supporting additional languages we have decided to integrate gRPC connections and perform the middleware operations within a gRPC server that is external to the Tyk process. Please contact us to learn more: + +
+ + + +Tyk has built-in support for gRPC backends, enabling you to build rich plugins using any of the gRPC supported languages. See [gRPC by language](http://www.grpc.io/docs/) for further details. + +#### Use Cases + +Deploying an external gRPC server to handle plugins provides numerous technical advantages: + +- Allows for independent scalability of the service from the Tyk Gateway. +- Utilizes a custom-designed server tailored to address specific security concerns, effectively mitigating various security risks associated with native plugins. + +#### gRPC Plugin Architectural Overview + +An example architecture is illustrated below. + +Using gRPC for plugins + +Here we can see that Tyk Gateway sends requests to an external Java gRPC server to handle authentication, via a CustomAuth plugin. The flow is as follows: + +- Tyk receives a HTTP request. +- Tyk serialises the request and session into a protobuf message that is dispatched to your gRPC server. +- The gRPC server performs custom middleware operations (for example, any modification of the request object). Each plugin (Pre, PostAuthKey, Post, Response etc.) is handled as separate gRPC request. +- The gRPC server sends the request back to Tyk. +- Tyk proxies the request to your upstream API. + +#### Limitations of gRPC plugins + +At the time of writing the following features are currently unsupported and unavailable in the serialised request: +- Client certificiates +- OAuth keys +- For graphQL APIs details concerning the *max_query_depth* is unavailable +- A request query parameter cannot be associated with multiple values + +#### gRPC Developer Resources + +The [Protocol Buffers](https://github.com/TykTechnologies/tyk/tree/master/coprocess/proto ) and [bindings](https://github.com/TykTechnologies/tyk/tree/master/coprocess/bindings) provided by Tyk should be used in order for successful ommunication between Tyk Gateway and your gRPC plugin server. Documentation for the protobuf messages is available in the [Rich Plugins Data Structures](/api-management/plugins/rich-plugins#rich-plugins-data-structures) page. + +You can generate supporting HTML documentation using the *docs* task in the [Taskfile](https://github.com/TykTechnologies/tyk/blob/master/coprocess/proto/Taskfile.yml) file of the [Tyk repository](https://github.com/TykTechnologies/tyk). This documentation explains the protobuf messages and services that allow gRPC plugins to handle a request made to the Gateway. Please refer to the README file within the proto folder of the tyk repository for further details. + +You may re-use the bindings that were generated for our samples or generate the bindings youself for Go, Python and Ruby, as implemented by the *generate* task in the [Taskfile](https://github.com/TykTechnologies/tyk/blob/master/coprocess/proto/Taskfile.yml) file of the [Tyk repository](https://github.com/TykTechnologies/tyk). + +If you wish to generate bindings for another target language you may generate the bindings yourself. The [Protocol Buffers](https://developers.google.com/protocol-buffers/) and [gRPC documentation](http://www.grpc.io/docs) provide specific requirements and instructions for each language. + +#### Load Balancing Between gRPC Servers + +Since Tyk 5.8.3 Tyk Gateway has had the ability to load balance between multiple gRPC servers. + +To implement this you must first specify the address of the load balanced service using the `dns:///` (note: triple slash) [protocol](https://github.com/grpc/grpc/blob/master/doc/naming.md) in Tyk Gateway's [gRPC server address](/tyk-oss-gateway/configuration#coprocess_optionscoprocess_grpc_server) configuration (`TYK_GW_COPROCESSOPTIONS_COPROCESSGRPCSERVER`). Tyk will retrieve the list of addresses for each gRPC server from that service. + +You can control whether Tyk will implement load balancing using the [gRPC round robin load balancing](/tyk-oss-gateway/configuration#coprocess_optionsgrpc_round_robin_load_balancing)) config (`TYK_GW_COPROCESSOPTIONS_GRPCROUNDROBINLOADBALANCING`): + +- If set to `true`, Tyk will balance load between the server addresses retrieved using a round robin approach. +- If set to `false`, Tyk will implement a sticky session approach without load balancing. + +Note that Tyk will only query the DNS on start-up, so if you need to update the list of gRPC servers that you want Tyk to target, you must restart Tyk Gateway. + +If you are not load balancing, you can alternatively provide the `tcp://` address of the gRPC server in Tyk Gateway's [gRPC server address](/tyk-oss-gateway/configuration#coprocess_optionscoprocess_grpc_server) configuration (`TYK_GW_COPROCESSOPTIONS_COPROCESSGRPCSERVER`) and set [gRPC round robin load balancing](/tyk-oss-gateway/configuration#coprocess_optionsgrpc_round_robin_load_balancing) (`TYK_GW_COPROCESSOPTIONS_GRPCROUNDROBINLOADBALANCING`) to `false`. + +--- + +### Getting Started: Key Concepts + +This document serves as a developer's guide for understanding the key concepts and practical steps for writing and configuring gRPC plugins for Tyk Gateway. It provides technical insights and practical guidance to seamlessly integrate Tyk plugins into your infrastructure through gRPC. The goal is to equip developers with the knowledge and tools needed to effectively utilize gRPC for enhancing Tyk Gateway functionalities. + +This comprehensive guide covers essential tasks, including: + +1. **Developing a gRPC Server:** Learn how to develop a gRPC server using [Tyk protocol buffers](https://github.com/TykTechnologies/tyk/tree/master/coprocess/proto). The gRPC server facilitates the execution of Tyk plugins, which offer custom middleware for various phases of the API request lifecycle. By integrating these plugins, developers can enable Tyk Gateway with enhanced control and flexibility in managing API requests, allowing for fine-grained customization and tailored processing at each stage of the request lifecycle. + +2. **Configuring Tyk Gateway:** Set up Tyk Gateway to communicate with your gRPC Server and, optionally, an external secured web server hosting the gRPC plugin bundle for API configurations. Configure Tyk Gateway to fetch the bundle configured for an API from the web server, enabling seamless integration with gRPC plugins. Specify connection settings for streamlined integration. + +3. **API Configuration:** Customize API settings within Tyk Gateway to configure gRPC plugin utilization. Define plugin hooks directly within the API Definition or remotely via an external web server for seamless request orchestration. Tyk plugins provide custom middleware for different phases of the API request lifecycle, enhancing control and flexibility. + +4. **API Testing:** Test that Tyk Gateway integrates with your gRPC server for the plugins configured for your API. + +--- + +#### Develop gRPC server + +Develop your gRPC server, using your preferred language, to handle requests from Tyk Gateway for each of the required plugin hooks. These hooks allow Tyk Gateway to communicate with your gRPC server to execute custom middleware at various stages of the API request lifecycle. + +##### Prerequisites + +The following prerequisites are necessary for developing a gRPC server that integrates with Tyk Gateway. + +####### Tyk gRPC Protocol Buffers + +A collection of [Protocol Buffer](https://github.com/TykTechnologies/tyk/tree/master/coprocess/proto) messages are available in the Tyk Gateway repository to allow Tyk Gateway to integrate with your gRPC server, requesting execution of plugin code. These messages establish a standard set of data structures that are serialised between Tyk Gateway and your gRPC Server. Developers should consult the [Rich Plugins Data Structures](/api-management/plugins/rich-plugins#rich-plugins-data-structures) page for further details. + +####### Protocol Buffer Compiler + +The protocol buffer compiler, `protoc`, should be installed to generate the service and data structures in your preferred language(s) from the [Tyk gRPC Protocol Buffer](https://github.com/TykTechnologies/tyk/tree/master/coprocess/proto) files. Developers should consult the [installation](https://grpc.io/docs/protoc-installation/) documentation at [grpc.io](https://grpc.io/) for an explanation of how to install `protoc`. + +##### Generate Bindings + +Generate the bindings (service and data structures) for your target language using the `protoc` compiler. Tutorials are available at [protobuf.dev](https://protobuf.dev/getting-started/) for your target language. + +##### Implement service + +Your gRPC server should implement the *Dispatcher* service to enable Tyk Gateway to integrate with your gRPC server. The Protocol Buffer definition for the *Dispatcher* service is listed below: + +```protobuf +service Dispatcher { + rpc Dispatch (Object) returns (Object) {} + rpc DispatchEvent (Event) returns (EventReply) {} +} +``` + +The *Dispatcher* service contains two RPC methods, *Dispatch* and *DispatchEvent*. Dispatch handles a requests made by Tyk Gateway for each plugin configured in your API. DispatchEvent receives notification of an event. + +Your *Dispatch* RPC should handle the request made by Tyk Gateway, implementing custom middleware for the intended plugin hooks. Each plugin hook allows Tyk Gateway to communicate with your gRPC server to execute custom middleware at various stages of the API request lifecycle, such as Pre, PostAuth, Post, Response etc. The Tyk Protocol Buffers define the [HookType](https://github.com/TykTechnologies/tyk/blob/master/coprocess/proto/coprocess_common.proto) enumeration to inspect the type of the intended gRPC plugin associated with the request. This is accessible as an attribute on the *Object* message, e.g. *object_message_instance.hook_type*. + +##### Developer resources + +Consult the [Tyk protocol buffers](https://github.com/TykTechnologies/tyk/tree/master/coprocess/proto) for the definition of the service and data structures that enable integration of Tyk gateway with your gRPC server. Tyk provides pre-generated [bindings](https://github.com/TykTechnologies/tyk/tree/master/coprocess/bindings) for C++, Java, Python and Ruby. + +Example tutorials are available that explain how to generate the protobuf bindings and implement a server for [Java](/api-management/plugins/rich-plugins#create-a-request-transformation-plugin-with-java), [.NET](/api-management/plugins/rich-plugins#create-custom-auth-plugin-with-dotnet) and [NodeJS](/api-management/plugins/rich-plugins#create-custom-auth-plugin-with-dotnet). + +Tyk Github repositories are also available with examples for [Ruby](https://github.com/TykTechnologies/tyk-plugin-demo-ruby) and [C#/.NET](https://github.com/TykTechnologies/tyk-plugin-demo-dotnet) + +--- + +#### Configure Tyk Gateway + +Configure Tyk Gateway to issue requests to your gRPC server and optionally, specify the URL of the web server that will serve plugin bundles. + +##### Configure gRPC server + +Modify the root of your `tyk.conf` file to include the *coprocess_options* section, similar to that listed below: + +```yaml +"coprocess_options": { + "enable_coprocess": true, + "coprocess_grpc_server": "tcp://127.0.0.1:5555", + "grpc_authority": "localhost", + "grpc_recv_max_size": 100000000, + "grpc_send_max_size": 100000000 +}, +``` + +A gRPC server can configured under the `coprocess_options` section as follows: + +- `enable_coprocess`: Enables the rich plugins feature. +- `coprocess_grpc_server`: Specifies the gRPC server URL, in this example we're using TCP. Tyk will attempt a connection on startup and keep reconnecting in case of failure. +- `grpc_recv_max_size`: Specifies the message size supported by the gateway gRPC client, for receiving gRPC responses. +- `grpc_send_max_size`: Specifies the message size supported by the gateway gRPC client for sending gRPC requests. +- `grpc_authority`: The `authority` header value, defaults to `localhost` if omitted. Allows configuration according to [RFC 7540](https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.3). + +When using gRPC plugins, Tyk acts as a gRPC client and dispatches requests to your gRPC server. gRPC libraries usually set a default maximum size, for example, the official gRPC Java library establishes a 4 +MB message size [https://jbrandhorst.com/post/grpc-binary-blob-stream/](https://jbrandhorst.com/post/grpc-binary-blob-stream/). + +Configuration parameters are available for establishing a message size in both directions (send and receive). For most use cases and especially if you're dealing with multiple hooks, where the same request object is dispatched, it is recommended to set both values to the same size. + +##### Configure Web server (optional) + +Tyk Gateway can be configured to download the gRPC plugin configuration for an API from a web server. For further details related to the concept of bundling plugins please refer to [plugin bundles](/api-management/plugins/overview#plugin-bundles). + +```yaml +"enable_bundle_downloader": true, +"bundle_base_url": "https://my-bundle-server.com/bundles/", +"public_key_path": "/path/to/my/pubkey", +``` + +The following parameters can be configured: +- `enable_bundle_downloader`: Enables the bundle downloader to download bundles from a webserver. +- `bundle_base_url`: Base URL from which to serve bundled plugins. +- `public_key_path`: Public key for bundle verification (optional) + +The `public_key_path` value is used for verifying signed bundles, you may omit this if unsigned bundles are used. + +--- + +#### Configure API + +Plugin hooks for your APIs in Tyk can be configured either by directly specifying them in a configuration file on the Gateway server or by hosting the configuration externally on a web server. This section explains how to configure gRPC plugins for your API endpoints on the local Gateway or remotely from an external secured web server. + +##### Local + +This section provides examples for how to configure gRPC plugin hooks, locally within an API Definition. Examples are provided for Tyk Gateway and Tyk Operator. + +###### Tyk Gateway + +For configurations directly embedded within the Tyk Gateway, plugin hooks can be defined within your API Definition. An example snippet from a Tyk Classic API Definition is provided below: + +```yaml +"custom_middleware": { + "pre": [ + {"name": "MyPreMiddleware"} + ], + "post": [ + {"name": "MyPostMiddleware"} + ], + "auth_check": { + "name": "MyAuthCheck" + }, + "driver": "grpc" +} +``` + +For example, a Post request plugin hook has been configured with name `MyPostMiddleware`. Before the request is sent upstream Tyk Gateway will serialize the request into a [Object protobuf message](/api-management/plugins/rich-plugins#coprocess-object) with the `hook_name` property set to `MyPostMiddleware` and the `hook_type` property set to `Post`. This message will then then be dispatched to the gRPC server for processing before the request is sent upstream. + +
+ + +Ensure the plugin driver is configured as type *grpc*. Tyk will issue a request to your gRPC server for each plugin hook that you have configured. + + + +###### Tyk Operator + +The examples below illustrate how to configure plugin hooks for an API Definition within Tyk Operator. + +Setting the `driver` configuring parameter to `gRPC` instructs Tyk Gateway to issue a request to your gRPC server for each plugin hook that you have configured. + +**Pre plugin hook example** + +In this example we can see that a `custom_middleware` configuration block has been used to configure a gRPC Pre request plugin hook with name `HelloFromPre`. Before any middleware is executed Tyk Gateway will serialize the request into a [Object protobuf message](/api-management/plugins/rich-plugins#coprocess-object) with the `hook_name` property set to `HelloFromPre` and the `hook_type` property set to `Pre`. This message will then then be dispatched to the gRPC server. + +```yaml {linenos=table,hl_lines=["14-18"],linenostart=1} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-grpc-pre +spec: + name: httpbin-grpc-pre + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.default.svc:8000 + listen_path: /httpbin-grpc-pre + strip_listen_path: true + custom_middleware: + driver: grpc + pre: + - name: HelloFromPre + path: "" +``` + +**Post plugin hook example** + +In the example we can see that a `custom_middleware` configuration block has been used to configure a gRPC Post plugin with name `HelloFromPost`. + +Before the request is sent upstream Tyk Gateway will serialize the request and session details into a [Object protobuf message](/api-management/plugins/rich-plugins#coprocess-object) with the `hook_name` property set to `HelloFromPost` and the `hook_type` property set to `Post`. This message will then then be dispatched to the gRPC server for processing before the request is sent upstream. + +```yaml {linenos=table,hl_lines=["14-18"],linenostart=1} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-grpc-post +spec: + name: httpbin-grpc-post + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.default.svc:8000 + listen_path: /httpbin-grpc-post + strip_listen_path: true + custom_middleware: + driver: grpc + post: + - name: HelloFromPost + path: "" +``` + +##### Remote + +It is possible to configure your API so that it downloads a bundled configuration of your plugins from an external webserver. The bundled plugin configuration is contained within a zip file. + +A gRPC plugin bundle is similar to the [standard bundling mechanism](/api-management/plugins/overview#plugin-bundles). The standard bundling mechanism zips the configuration and plugin source code, which will be executed by Tyk. Conversely, a gRPC plugin bundle contains only the configuration (`manifest.json`), with plugin code execution being handled independently by the gRPC server. + +Bundling a gRPC plugin requires the following steps: +- Create a `manifest.json` that contains the configuration of your plugins +- Build a zip file that bundles your plugin +- Upload the zip file to an external secured webserver +- Configure your API to download your plugin bundle + +###### Create the manifest file + +The `manifest.json` file specifies the configuration for your gRPC plugins. An example `manifest.json` is listed below: + +```yaml +{ + "file_list": [], + "custom_middleware": { + "pre": [{"name": "MyPreMiddleware"}], + "post": [{"name": "MyPostMiddleware"}], + "auth_check": {"name": "MyAuthCheck"}, + "driver": "grpc" + }, + "checksum": "", + "signature": "" +} +``` + + + +The source code files, *file_list*, are empty for gRPC plugins. Your gRPC server contains the source code for handling plugins. + + + +###### Build plugin bundle + +A plugin bundle can be built using the Tyk Gateway binary and should only contain the `manifest.json` file: + +```bash +tyk bundle build -output mybundle.zip -key mykey.pem +``` + +The example above generates a zip file, name `mybundle.zip`. The zip file is signed with key `mykey.pem`. + +The resulting bundle file should then be uploaded to the webserver that hosts your plugin bundles. + +###### Configure API + +####### Tyk Gateway + +To add a gRPC plugin to your API definition, you must specify the bundle file name within the `custom_middleware_bundle` field: + +```yaml +{ + "name": "Tyk Test API", + ... ++ "custom_middleware_bundle": "mybundle.zip" +} +``` + +The value of the `custom_middleware_bundle` field will be used in combination with the gateway settings to construct a bundle URL. For example, if Tyk Gateway is configured with a webserver base URL of `https://my-bundle-server.com/bundles/` then an attempt would be made to download the bundle from `https://my-bundle-server.com/bundles/mybundle.zip`. + +####### Tyk Operator + + Currently this feature is not yet documented with a Tyk Operator example for configuring an API to use plugin bundles. For further details please reach out and contact us on the [community support forum](https://community.tyk.io). + +--- + +#### Test your API Endpoint + +It is crucial to ensure the security and reliability of your gRPC server. As the developer, it is your responsibility to verify that your gRPC server is secured and thoroughly tested with appropriate test coverage. Consider implementing unit tests, integration tests and other testing methodologies to ensure the robustness of your server's functionality and security measures. This step ensures that the Tyk Gateway properly communicates with your gRPC server and executes the custom logic defined by the plugin hooks. + +Test the API endpoint using tools like *Curl* or *Postman*. Ensure that your gRPC server is running and the gRPC plugin(s) are functioning. An example using *Curl* is listed below: + +```bash +curl -X GET https://www.your-gateway-server.com:8080/api/path +``` + +Replace `https://www.your-gateway-server.com:8080/api/path` with the actual endpoint of your API. + +--- + +#### Summary + +This guide has explained the key concepts and processes for writing gRPC plugins that integrate with Tyk Gateway. The following explanations have been given: + +- Prerequisites for developing a gRPC server for your target language. +- The *Dispatcher* service interface. +- How to configure Tyk Gateway to integrate with your gRPC server. +- How to configure Tyk Gateway with an optional external web server for fetching plugin configuration. +- How to configure gRPC plugins for your APIs. +- How to test your API integration with your gRPC server using curl. + +--- + +#### What's Next? + +- Consult the [Protocol Buffer messages](/api-management/plugins/rich-plugins#rich-plugins-data-structures) that Tyk Gateway uses when making a request to a gRPC server. +- Visit tutorial guides that explain how to implement a [Java](/api-management/plugins/rich-plugins#create-a-request-transformation-plugin-with-java), [.NET](/api-management/plugins/rich-plugins#create-custom-auth-plugin-with-dotnet) and [NodeJS](/api-management/plugins/rich-plugins#create-custom-auth-plugin-with-dotnet) gRPC server. +- Visit our [plugins hub](/api-management/plugins/overview#plugins-hub) to explore further gRPC development examples and resources. + +--- + + + + +### Getting Started: Creating A Python gRPC Server + +In the realm of API integration, establishing seamless connections between services is paramount. + +Understanding the fundamentals of gRPC server implementation is crucial, especially when integrating with a Gateway solution like Tyk. This guide aims to provide practical insights into this process, starting with the basic principles of how to implement a Python gRPC server that integrates with Tyk Gateway. + +#### Objectives + +By the end of this guide, you will be able to implement a gRPC server that will integrate with Tyk Gateway, setting the stage for further exploration in subsequent parts: + +- Establishing the necessary tools, Python libraries and gRPC service definition for implementing a gRPC server that integrates with Tyk Gateway. +- Developing a basic gRPC server that echoes the request payload to the console, showcasing the core principles of integration. +- Configuring Tyk Gateway to interact with our gRPC server, enabling seamless communication between the two services. + +Before implementing our first gRPC server it is first necessary to understand the service interface that defines how Tyk Gateway integrates with a gRPC server. + + +#### Tyk Dispatcher Service + +The *Dispatcher* service, defined in the [coprocess_object.proto](https://github.com/TykTechnologies/tyk/blob/master/coprocess/proto/coprocess_object.proto) file, contains the *Dispatch* RPC method, invoked by Tyk Gateway to request remote execution of gRPC plugins. Tyk Gateway dispatches accompanying data relating to the original client request and session. The service definition is listed below: + +```protobuf +service Dispatcher { + rpc Dispatch (Object) returns (Object) {} + rpc DispatchEvent (Event) returns (EventReply) {} +} +``` + +On the server side, we will implement the *Dispatcher* service methods and a gRPC server to handle requests from Tyk Gateway. The gRPC infrastructure decodes incoming requests, executes service methods and encodes service responses. + +Before we start developing our gRPC server we need to setup our development environment with the supporting libraries and tools. + + +#### Prerequisites + +Firstly, we need to download the [Tyk Protocol Buffers](https://github.com/TykTechnologies/tyk/tree/master/coprocess/proto) and install the Python protoc compiler. + +We are going to use the *protoc* compiler to generate the supporting classes and data structures to implement the *Dispatcher* service. + + +##### Tyk Protocol Buffers + +Issue the following command to download and extract the Tyk Protocol Buffers from the Tyk GitHub repository: + +```bash +curl -sL "https://github.com/TykTechnologies/tyk/archive/master.tar.gz " -o tyk.tar.gz && \ + mkdir tyk && \ + tar -xzvf tyk.tar.gz --strip-components=1 -C tyk && \ + mv tyk/coprocess/proto/* . && \ + rm -r tyk tyk.tar.gz +``` + +##### Install Dependencies + +We are going to setup a Python virtual environment and install some supporting dependencies. Assuming that you have Python [virtualenv](https://virtualenv.pypa.io/en/latest/installation.html) already installed, then issue the following commands to setup a Python virtual environment containing the grpcio and grpcio-tools libraries: + +```bash +python3 -m venv .venv +source .venv/bin/activate +pip install –upgrade pip +pip install grpcio grpcio-tools grpcio-reflection +``` + +The [grpcio](https://pypi.org/project/grpcio/) library offers essential functionality to support core gRPC features such as message serialisation and deserialisation. The [grpcio-tools](https://pypi.org/project/grpcio-tools/) library provides the Python *protoc* compiler that we will use to generate the supporting classes and data structures to implement our gRPC server. The [grpcio-reflection](https://pypi.org/project/grpcio-reflection/) library allows clients to query information about the services and methods provided by a gRPC server at runtime. It enables clients to dynamically discover available services, their RPC methods, in addition to the message types and field names associated with those methods. + + +##### Install grpcurl + +Follow the [installation instructions](https://github.com/fullstorydev/grpcurl?tab=readme-ov-file#installation) to install grpcurl. We will use grpcurl to send test requests to our gRPC server. + + +##### Generate Python Bindings + +We are now able to generate the Python classes and data structures to allow us to implement our gRPC server. To accomplish this we will use the Python *protoc* command as listed below: + +```bash +python -m grpc_tools.protoc --proto_path=. --python_out=. --grpc_python_out=. *.proto +``` + +This compiles the Protocol Buffer files (*.proto) from the current working directory and generates the Python classes representing the Protocol Buffer messages and services. A series of *.py* files should now exist in the current working directory. We are interested in the *coprocess_object_pb2_grpc.py* file, containing a default implementation of *Tyk’s Dispatcher* service. + +Inspect the generated Python file, *coprocess_object_pb2_grpc.py*, containing the *DispatcherServicer* class: + +```python +class DispatcherServicer(object): + """ GRPC server interface, that must be implemented by the target language """ + def Dispatch(self, request, context): + """ Accepts and returns an Object message """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def DispatchEvent(self, request, context): + """ Dispatches an event to the target language """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') +``` + +This superclass contains a default stub implementation for the **Dispatch** and **DispatchEvent** RPC methods, each defining request and context parameters: + +The *request* parameter allows our server to access the message payload sent by Tyk Gateway. We can use this data, pertaining to the request and session, to process and generate a response. + +The *context* parameter provides additional information and functionalities related to the RPC call, such as timeout limits, cancelation signals etc. This is a [grpc.ServicerContext](https://grpc.github.io/grpc/python/grpc.html#grpc.ServicerContext) or a [grpc.aio.ServicerContext](https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.ServicerContext), object depending upon whether a synchronous or AsyncIO gRPC server is implemented. + +In the next step we will implement a subclass that will handle requests made by Tyk Gateway for remote execution of custom plugins. + + +#### Implement Dispatcher Service + +We will now develop the *Dispatcher* service, adding implementations of the *Dispatch* and *DispatchEvent* methods, to allow our gRPC server to integrate with Tyk Gateway. Before we continue, create a file, *async_server.py*, within the same folder as the generated Protocol Buffer (.proto) files. + + +##### Dispatch + +Our implementation of the Dispatch RPC method will deserialize the request payload and output to the console as JSON format. This serves as a useful development and debugging aid, allowing inspection of the request and session state dispatched by Tyk Gateway to our gRPC server. + +Copy and paste the following source code into the *async_server.py* file. Notice that we have used type hinting to aid readability. The type hints are located within the type hint files (.pyi) we generated with the protoc compiler. + + +```python +import asyncio +import grpc +import json +import signal +import logging +from google.protobuf.json_format import MessageToJson +from grpc_reflection.v1alpha import reflection +import coprocess_object_pb2_grpc +import coprocess_object_pb2 +from coprocess_common_pb2 import HookType +from coprocess_session_state_pb2 import SessionState +class PythonDispatcher(coprocess_object_pb2_grpc.DispatcherServicer): + async def Dispatch( + self, object: coprocess_object_pb2.Object, context: grpc.aio.ServicerContext + ) -> coprocess_object_pb2.Object: + logging.info(f"STATE for {object.hook_name}\n{MessageToJson(object)}\n") + if object.hook_type == HookType.Pre: + logging.info(f"Pre plugin name: {object.hook_name}") + logging.info(f"Activated Pre Request plugin from API: {object.spec.get('APIID')}") + elif object.hook_type == HookType.CustomKeyCheck: + logging.info(f"CustomAuth plugin: {object.hook_name}") + logging.info(f"Activated CustomAuth plugin from API: {object.spec.get('APIID')}") + elif object.hook_type == HookType.PostKeyAuth: + logging.info(f"PostKeyAuth plugin name: {object.hook_name}") + logging.info(f"Activated PostKeyAuth plugin from API: {object.spec.get('APIID')}") + elif object.hook_type == HookType.Post: + logging.info(f"Post plugin name: {object.hook_name}") + logging.info(f"Activated Post plugin from API: {object.spec.get('APIID')}") + elif object.hook_type == HookType.Response: + logging.info(f"Response plugin name: {object.hook_name}") + logging.info(f"Activated Response plugin from API: {object.spec.get('APIID')}") + logging.info("--------\n") + return object +``` + +Our *Dispatch* RPC method accepts the two parameters, *object* and *context*. The object parameter allows us to inspect the state and session of the request object dispatched by Tyk Gateway, via accessor methods. The *context* parameter can be used to set timeout limits etc. associated with the RPC call. + +The important takeaways from the source code listing above are: + +- The [MessageToJson](https://googleapis.dev/python/protobuf/latest/google/protobuf/json_format.html#google.protobuf.json_format.MessageToJson) function is used to deserialize the request payload as JSON. +- In the context of custom plugins we access the *hook_type* and *hook_name* attributes of the *Object* message to determine which plugin to execute. +- The ID of the API associated with the request is accessible from the spec dictionary, *object.spec.get('APIID')*. + +An implementation of the *Dispatch* RPC method must return the object payload received from Tyk Gateway. The payload can be modified by the service implementation, for example to add or remove headers and query parameters before the request is sent upstream. + + +##### DispatchEvent + +Our implementation of the *DispatchEvent* RPC method will deserialize and output the event payload as JSON. Append the following source code to the *async_server.py* file: + +```python + async def DispatchEvent( + self, event: coprocess_object_pb2.Event, context: grpc.aio.ServicerContext + ) -> coprocess_object_pb2.EventReply: + event = json.loads(event.payload) + http://logging.info (f"RECEIVED EVENT: {event}") + return coprocess_object_pb2.EventReply() +``` + +The *DispatchEvent* RPC method accepts the two parameters, *event* and *context*. The event parameter allows us to inspect the payload of the event dispatched by Tyk Gateway. The context parameter can be used to set timeout limits etc. associated with the RPC call. + +The important takeaways from the source code listing above are: + +- The event data is accessible from the *payload* attribute of the event parameter. +- An implementation of the *DispatchEvent* RPC method must return an instance of *coprocess_object_pb2.EventReply*. + + +#### Create gRPC Server + +Finally, we will implement an AsyncIO gRPC server to handle requests from Tyk Gateway to the *Dispatcher* service. We will add functions to start and stop our gRPC server. Finally, we will use *grpcurl* to issue a test payload to our gRPC server to test that it is working. + + +##### Develop gRPC Server + +Append the following source code from the listing below to the *async_server.py* file: + +```python +async def serve() -> None: + server = grpc.aio.server() + coprocess_object_pb2_grpc.add_DispatcherServicer_to_server( + PythonDispatcher(), server + ) + listen_addr = "[::]:50051" + SERVICE_NAMES = ( + coprocess_object_pb2.DESCRIPTOR.services_by_name["Dispatcher"].full_name, + reflection.SERVICE_NAME, + ) + + reflection.enable_server_reflection(SERVICE_NAMES, server) + server.add_insecure_port(listen_addr) + + logging.info ("Starting server on %s", listen_addr) + + await server.start() + await server.wait_for_termination() + +async def shutdown_server(server) -> None: + http://logging.info ("Shutting down server...") + await server.stop(None) +``` + +The *serve* function starts the gRPC server, listening for requests on port 50051 with reflection enabled. + +Clients can use reflection to list available services, obtain their RPC methods and retrieve their message types and field names dynamically. This is particularly useful for tooling and debugging purposes, allowing clients to discover server capabilities without prior knowledge of the service definitions. + + + +**note** + +A descriptor is a data structure that describes the structure of the messages, services, enums and other elements defined in a .proto file. The purpose of the descriptor is primarily metadata: it provides information about the types and services defined in the protocol buffer definition. The *coprocess_object_pb2.py* file that we generated using *protoc* contains a DESCRIPTOR field that we can use to retrieve this metadata. For further details consult the documentation for the Google's protobuf [FileDescriptor](https://googleapis.dev/python/protobuf/latest/google/protobuf/descriptor.html#google.protobuf.descriptor.FileDescriptor.services_by_name) class. + + + +The *shutdown_server* function stops the gRPC server via the *stop* method of the server instance. + +The key takeaways from the source code listing above are: + +- An instance of a gRPC server is created using *grpc.aio.server()*. +- A service implementation should be registered with the gRPC server. We register our *PythonDispatcher* class via *coprocess_object_pb2_grpc.add_DispatcherServicer_to_server(PythonDispatcher(), server)*. +- Reflection can be enabled to allow clients to dynamically discover the services available at a gRPC server. We enabled our *Dispatcher* service to be discovered via *reflection.enable_server_reflection(SERVICE_NAMES, server)*. SERVICE_NAMES is a tuple containing the full names of two gRPC services: the *Dispatcher* service obtained by using the DESCRIPTOR field within the *coprocess_object_pb2* module and the other being the standard reflection service. +- The server instance should be started via invoking and awaiting the *start* and *wait_for_termination* methods of the server instance. +- A port may be configured for the server. In this example we configured an insecure port of 50051 on the server instance via the [add_insecure_port function](https://grpc.github.io/grpc/python/grpc.html#grpc.Server.add_insecure_port). It is also possible to add a secure port via the [add_secure_port](https://grpc.github.io/grpc/python/grpc.html#grpc.Server.add_secure_port) method of the server instance, which accepts the port number in addition to an SSL certificate and key to enable TLS encryption. +- The server instance can be stopped via its stop method. + +Finally, we will allow our server to terminate upon receipt of SIGTERM and SIGINT signals. To achieve this, append the source code listed below to the *async_server.py* file. + +```python +def handle_sigterm(sig, frame) -> None: + asyncio.create_task(shutdown_server(server)) + +async def handle_sigint() -> None: + loop = asyncio.get_running_loop() + for sig in (signal.SIGINT, signal.SIGTERM): + loop.add_signal_handler(sig, loop.stop) + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + server = None + signal.signal(signal.SIGTERM, handle_sigterm) + try: + asyncio.get_event_loop().run_until_complete(serve()) + except KeyboardInterrupt: + pass +``` + + +##### Start gRPC Server + +Issue the following command to start the gRPC server: + +```bash +python3 -m async_server +``` + +A message should be output on the console, displaying the port number and confirming that the gRPC server has started. + + +##### Test gRPC Server + +To test our gRPC server is working, issue test requests to the *Dispatch* and *DispatchEvent* methods, using *grpcurl*. + + +####### Send Dispatch Request + +Use the *grpcurl* command to send a test dispatch request to our gRPC server: + +```bash +grpcurl -plaintext -d '{ + "hookType": "Pre", + "hookName": "MyPreCustomPluginForBasicAuth", + "request": { + "headers": { + "User-Agent": "curl/8.1.2", + "Host": "tyk-gateway.localhost:8080", + "Authorization": "Basic ZGV2QHR5ay5pbzpwYXN0cnk=", + "Accept": "*/*" + }, + "url": "/basic-authentication-valid/get", + "returnOverrides": { + "responseCode": -1 + }, + "method": "GET", + "requestUri": "/basic-authentication-valid/get", + "scheme": "https" + }, + "spec": { + "bundle_hash": "d41d8cd98f00b204e9800998ecf8427e", + "OrgID": "5e9d9544a1dcd60001d0ed20", + "APIID": "04e911d3012646d97fcdd6c846fafc4b" + } +}' localhost:50051 coprocess.Dispatcher/Dispatch +``` + +Inspect the console output of your gRPC server. It should echo the payload that you sent in the request. + + +####### Send DispatchEvent Request + +Use the grpcurl command to send a test event payload to our gRPC server: + +```bash +grpcurl -plaintext -d '{"payload": "{\"event\": \"test\"}"}' localhost:50051 coprocess.Dispatcher/DispatchEvent +``` + +Inspect the console output of your gRPC server. It should display a log similar to that shown below: + +```bash +INFO:root:RECEIVED EVENT: {'event': 'test'} +``` + +The response received from the server should be an empty event reply, similar to that shown below: + +```bash +grpcurl -plaintext -d '{"payload": "{\"event\": \"test\"}"}' localhost:50051 coprocess.Dispatcher/DispatchEvent +{} +``` + +At this point we have tested, independently of Tyk Gateway, that our gRPC Server can handle an example request payload for gRPC plugin execution. In the next section we will create a test environment for testing that Tyk Gateway integrates with our gRPC server for API requests. + + +#### Configure Test Environment + +Now that we have implemented and started a gRPC server, Tyk Gateway needs to be configured to integrate with it. To achieve this we will enable the coprocess feature and configure the URL of the gRPC server. + +We will also create an API so that we can test that Tyk Gateway integrates with our gRPC server. + + +##### Configure Tyk Gateway + +Within the root of the *tyk.conf* file, add the following configuration, replacing host and port with values appropriate for your environment: + +```yaml +"coprocess_options": { + "enable_coprocess": true, + "coprocess_grpc_server": "tcp://host:port" +} +``` + +Alternatively, the following environment variables can be set in your .env file: + +```bash +TYK_GW_COPROCESSOPTIONS_ENABLECOPROCESS=true +TYK_GW_COPROCESSOPTIONS_COPROCESSGRPCSERVER=tcp://host:port +``` + +Replace host and port with values appropriate for your environment. + + +##### Configure API + +Before testing our gRPC server we will create and configure an API with 2 plugins: + +- **Pre Request**: Named *MyPreRequestPlugin*. +- **Response**: Named *MyResponsePlugin* and configured so that Tyk Gateway dispatches the session state with the request. + +Each plugin will be configured to use the *grpc* plugin driver. + +Tyk Gateway will forward details of an incoming request to the gRPC server, for each of the configured API plugins. + + +####### Tyk Classic API + +gRPC plugins can be configured within the *custom_middleware* section of the Tyk Classic ApiDefinition, as shown in the listing below: + +```yaml +{ + "created_at": "2024-03-231T12:49:52Z", + "api_model": {}, + "api_definition": { + ... + ... + "custom_middleware": { + "pre": [ + { + "disabled": false, + "name": "MyPreRequestPlugin", + "path": "", + "require_session": false, + "raw_body_only": false + } + ], + "post": [], + "post_key_auth": [], + "auth_check": { + "disabled": false, + "name": "", + "path": "", + "require_session": false, + "raw_body_only": false + }, + "response": [ + { + "disabled": false, + "name": "MyResponsePlugin", + "path": "", + "require_session": true, + "raw_body_only": false + } + ], + "driver": "grpc", + "id_extractor": { + "disabled": false, + "extract_from": "", + "extract_with": "", + "extractor_config": {} + } + } +} +``` + +In the above listing, the plugin driver parameter has been configured with a value of *grpc*. Two plugins are configured within the *custom_middleware* section: a *Pre Request* plugin and a *Response* plugin. + +The *Response* plugin is configured with *require_session* enabled, so that Tyk Gateway will send details for the authenticated key / user with the gRPC request. Note, this is not configured for *Pre Request* plugins that are triggered before authentication in the request lifecycle. + + +####### Tyk OAS API + +To quickly get started, a Tyk OAS API schema can be created by importing the infamous [pet store](https://petstore3.swagger.io/api/v3/openapi.json) OAS schema. Then the [findByStatus](https://petstore3.swagger.io/api/v3/pet/findByStatus?status=available) endpoint can be used for testing. + +The resulting Tyk OAS API Definition contains the OAS JSON schema with an *x-tyk-api-gateway* section appended, as listed below. gRPC plugins can be configured within the middleware section of the *x-tyk-api-gateway* that is appended at the end of the OAS schema: + +```yaml +"x-tyk-api-gateway": { + "info": { + "id": "6e2ae9b858734ea37eb772c666517f55", + "dbId": "65f457804773a600011af41d", + "orgId": "5e9d9544a1dcd60001d0ed20", + "name": "Swagger Petstore - OpenAPI 3.0 Custom Authentication", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://petstore3.swagger.io/api/v3/" + }, + "server": { + "listenPath": { + "value": "/custom_auth", + "strip": true + }, + "authentication": { + "enabled": true, + "custom": { + "enabled": true, + "header": { + "enabled": false, + "name": "Authorization" + } + } + } + }, + "middleware": { + "global": { + "pluginConfig": { + "driver": "grpc" + } + }, + "cors": { + "enabled": false, + "maxAge": 24, + "allowedHeaders": [ + "Accept", + "Content-Type", + "Origin", + "X-Requested-With", + "Authorization" + ], + "allowedOrigins": [ + "*" + ], + "allowedMethods": [ + "GET", + "HEAD", + "POST" + ] + }, + "prePlugin": { + "api-management/plugins/overview#": [ + { + "enabled": true, + "functionName": "MyPreRequestPlugin", + "path": "" + } + ] + }, + "responsePlugin": { + "api-management/plugins/overview#": [ + { + "enabled": true, + "functionName": "MyResponsePlugin", + "path": "", + "requireSession": true + } + ] + } + } +} +``` + +In the above listing, the plugin driver parameter has been set to *grpc*. Two plugins are configured within the middleware section: a *Pre Request* plugin and a *Response* plugin. + +The *Response* plugin is configured with *requireSession* enabled, so that Tyk Gateway will send details for the authenticated key / user with the gRPC request. Note, this is not configurable for *Pre Request* plugins that are triggered before authentication in the request lifecycle. + +Tyk Gateway will forward details of an incoming request to the gRPC server, for each plugin. + + +#### Test API + +We have implemented and configured a gRPC server to integrate with Tyk Gateway. Furthermore, we have created an API that has been configured with two gRPC plugins: a *Pre Request* and *Response* plugin. + +When we issue a request to our API and observe the console output of our gRPC server we should see a JSON representation of the request headers etc. echoed in the terminal. + +Issue a request for your API in the terminal window. For example: + +```bash +curl -L http://.localhost:8080/grpc-http-bin +``` + +Observe the console output of your gRPC server. Tyk Gateway should have dispatched two requests to your gRPC server; a request for the *Pre Request* plugin and a request for the *Response* plugin. + +The gRPC server we implemented echoes a JSON representation of the request payload dispatched by Tyk Gateway. + +Note that this is a useful feature for learning how to develop gRPC plugins and understanding the structure of the request payload dispatched by Tyk Gateway to the gRPC server. However, in production environments care should be taken to avoid inadvertently exposing sensitive data such as secrets in the session. + + +#### Summary + +In this guide, we've delved into the integration of a Python gRPC server with Tyk Gateway. + +We have explained how to implement a Python gRPC server and equipped developers with the necessary tools, knowledge and capabilities to effectively utilize Tyk Gateway through gRPC services. + +The following essential groundwork has been covered: + +- Setting up tools, libraries and service definitions for the integration. +- Developing a basic gRPC server with functionality to echo the request payload, received from Tyk Gateway, in JSON format. +- Configuring Tyk Gateway for seamless communication with our gRPC server. + + +### Create a Request Transformation Plugin with Java + +This tutorial will guide you through the creation of a gRPC-based Java plugin for Tyk. +Our plugin will inject a header into the request before it gets proxied upstream. For additional information about gRPC, check the official documentation [here](https://grpc.io/docs/guides/index.html). + +The sample code that we'll use implements a request transformation plugin using Java and uses the proper gRPC bindings generated from our Protocol Buffers definition files. + +#### Requirements + +- Tyk Gateway: This can be installed using standard package management tools like Yum or APT, or from source code. See [here][1] for more installation options. +- The Tyk CLI utility, which is bundled with our RPM and DEB packages, and can be installed separately from [https://github.com/TykTechnologies/tyk-cli][2]. +- In Tyk 2.8 the Tyk CLI is part of the gateway binary, you can find more information by running "tyk help bundle". +- Gradle Build Tool: https://gradle.org/install/. +- gRPC tools: https://grpc.io/docs/quickstart/csharp.html#generate-grpc-code +- Java JDK 7 or higher. + +#### Create the Plugin + +##### Setting up the Java Project + +We will use the Gradle build tool to generate the initial files for our project: + +```bash +cd ~ +mkdir tyk-plugin +cd tyk-plugin +gradle init +``` + +We now have a `tyk-plugin` directory containing the basic skeleton of our application. + +Add the following to `build.gradle` + +```{.copyWrapper} +buildscript { + repositories { + jcenter() + } + dependencies { + classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.1' + } +} + +plugins { + id "com.google.protobuf" version "0.8.1" + id "java" + id "application" + id "idea" +} + +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:3.3.0" + } + plugins { + grpc { + artifact = 'io.grpc:protoc-gen-grpc-java:1.5.0' + } + } + generateProtoTasks { + all()*.plugins { + grpc {} + } + } + generatedFilesBaseDir = "$projectDir/src/generated" +} + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 + +mainClassName = "com.testorg.testplugin.PluginServer" + +repositories { + mavenCentral() +} + +dependencies { + compile 'io.grpc:grpc-all:1.5.0' +} + +idea { + module { + sourceDirs += file("${projectDir}/src/generated/main/java"); + sourceDirs += file("${projectDir}/src/generated/main/grpc"); + } +} +``` + +##### Create the Directory for the Server Class + +```bash +cd ~/tyk-plugin +mkdir -p src/main/java/com/testorg/testplugin +``` + +##### Install the gRPC Tools + +We need to download the Tyk Protocol Buffers definition files, these files contains the data structures used by Tyk. See [Data Structures](/api-management/plugins/rich-plugins#rich-plugins-data-structures) for more information: + +```bash +cd ~/tyk-plugin +git clone https://github.com/TykTechnologies/tyk +mv tyk/coprocess/proto src/main/proto +``` + +##### Generate the Bindings + +To generate the Protocol Buffers bindings we use the Gradle build task: + +```bash +gradle build +``` + +If you need to customize any setting related to the bindings generation step, check the `build.gradle` file. + +##### Implement Server + +We need to implement two classes: one class will contain the request dispatcher logic and the actual middleware implementation. The other one will implement the gRPC server using our own dispatcher. + +From the `~/tyk-plugin/src/main/java/com/testorg/testplugin` directory, create a file named `PluginDispatcher.java` with the following code: + +```java +package com.testorg.testplugin; + +import coprocess.DispatcherGrpc; +import coprocess.CoprocessObject; + +public class PluginDispatcher extends DispatcherGrpc.DispatcherImplBase { + + @Override + public void dispatch(CoprocessObject.Object request, + io.grpc.stub.StreamObserver responseObserver) { + CoprocessObject.Object modifiedRequest = null; + + switch (request.getHookName()) { + case "MyPreMiddleware": + modifiedRequest = MyPreHook(request); + default: + // Do nothing, the hook name isn't implemented! + } + + // Return the modified request (if the transformation was done): + if (modifiedRequest != null) { + responseObserver.onNext(modifiedRequest); + }; + + responseObserver.onCompleted(); + } + + CoprocessObject.Object MyPreHook(CoprocessObject.Object request) { + CoprocessObject.Object.Builder builder = request.toBuilder(); + builder.getRequestBuilder().putSetHeaders("customheader", "customvalue"); + return builder.build(); + } +} +``` + +In the same directory, create a file named `PluginServer.java` with the following code. This is the server implementation: + +```java +package com.testorg.testplugin; + +import coprocess.DispatcherGrpc; + +import io.grpc.Server; +import io.grpc.ServerBuilder; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; + +public class PluginServer { + + private static final Logger logger = Logger.getLogger(PluginServer.class.getName()); + static Server server; + static int port = 5555; + + public static void main(String[] args) throws IOException, InterruptedException { + System.out.println("Initializing gRPC server."); + + // Our dispatcher is instantiated and attached to the server: + server = ServerBuilder.forPort(port) + .addService(new PluginDispatcher()) + .build() + .start(); + + blockUntilShutdown(); + + } + + static void blockUntilShutdown() throws InterruptedException { + if (server != null) { + server.awaitTermination(); + } + } +} +``` + +To run the gRPC server we can use the following command: + +```bash +cd ~/tyk-plugin +gradle runServer +``` + +The gRPC server will listen on port 5555 (as defined in `Server.java`). In the next steps we'll setup the plugin bundle and modify Tyk to connect to our gRPC server. + + +#### Bundle the Plugin + +We need to create a manifest file within the `tyk-plugin` directory. This file contains information about our plugin and how we expect it to interact with the API that will load it. This file should be named `manifest.json` and needs to contain the following: + +```json +{ + "custom_middleware": { + "driver": "grpc", + "pre": [{ + "name": "MyPreMiddleware" + }] + } +} +``` + +- The `custom_middleware` block contains the middleware settings like the plugin driver we want to use (`driver`) and the hooks that our plugin will expose. We use the `pre` hook for this tutorial. For other hooks see [here](/api-management/plugins/rich-plugins#coprocess-dispatcher---hooks). +- The `name` field references the name of the function that we implemented in our plugin code - `MyPreMiddleware`. This will be handled by our dispatcher gRPC method in `PluginServer.java`. + +To bundle our plugin run the following command in the `tyk-plugin` directory. Check your tyk-cli install path first: + +```bash +/opt/tyk-gateway/utils/tyk-cli bundle build -y +``` + +For Tyk 2.8 use: +```bash +/opt/tyk-gateway/bin/tyk bundle build -y +``` + +A plugin bundle is a packaged version of the plugin. It may also contain a cryptographic signature of its contents. The `-y` flag tells the Tyk CLI tool to skip the signing process in order to simplify the flow of this tutorial. + +For more information on the Tyk CLI tool, see [here](/api-management/plugins/overview#plugin-bundles). + +You should now have a `bundle.zip` file in the `tyk-plugin` directory. + +#### Publish the Plugin + +To publish the plugin, copy or upload `bundle.zip` to a local web server like Nginx, or Apache or storage like Amazon S3. For this tutorial we'll assume you have a web server listening on `localhost` and accessible through `http://localhost`. + + + +#### What's Next? + +In this tutorial we learned how Tyk gRPC plugins work. For a production-level setup we suggest the following: + +- Configure an appropriate web server and path to serve your plugin bundles. + +[1]: /tyk-self-managed/install +[2]: https://github.com/TykTechnologies/tyk-cli +[3]: /img/dashboard/system-management/api_settings.png +[4]: /img/dashboard/system-management/plugin_options.png + +### Create Custom Authentication Plugin with .NET + + +This tutorial will guide you through the creation of a custom authentication plugin for Tyk with a gRPC based plugin with .NET and C#. For additional information check the official gRPC [documentation](https://grpc.io/docs/guides/index.html). + +The sample code that we’ll use implements a very simple authentication layer using .NET and the proper gRPC bindings generated from our Protocol Buffers definition files. + +Using gRPC for plugins + +#### Requirements + +- Tyk Gateway: This can be installed using standard package management tools like Yum or APT, or from source code. See [here][1] for more installation options. +- The Tyk CLI utility, which is bundled with our RPM and DEB packages, and can be installed separately from [https://github.com/TykTechnologies/tyk-cli][2] +- In Tyk 2.8 the Tyk CLI is part of the gateway binary, you can find more information by running "tyk help bundle". +- .NET Core for your OS: https://www.microsoft.com/net/core +- gRPC tools: https://grpc.io/docs/quickstart/csharp.html#generate-grpc-code + +#### Create the Plugin + +##### Create .NET Project + +We use the .NET CLI tool to generate the initial files for our project: + +```bash +cd ~ +dotnet new console -o tyk-plugin +``` + +We now have a `tyk-plugin` directory containing the basic skeleton of a .NET application. + +From the `tyk-plugin` directory we need to install a few packages that the gRPC server requires: + +```bash +dotnet add package Grpc --version 1.6.0 +dotnet add package System.Threading.ThreadPool --version 4.3.0 +dotnet add package Google.Protobuf --version 3.4.0 +``` + +- The `Grpc` package provides base code for our server implementation. +- The `ThreadPool` package is used by `Grpc`. +- The `Protobuf` package will be used by our gRPC bindings. + +##### Install the gRPC Tools + +We need to install the gRPC tools to generate the bindings. We recommended you follow the official guide here: https://grpc.io/docs/quickstart/csharp.html#generate-grpc-code. + +Run the following Commands (both MacOS and Linux): + +```bash +cd ~/tyk-plugin +temp_dir=packages/Grpc.Tools.1.6.x/tmp +curl_url=https://www.nuget.org/api/v2/package/Grpc.Tools/ +mkdir -p $temp_dir && cd $temp_dir && curl -sL $curl_url > tmp.zip; unzip tmp.zip && cd .. && cp -r tmp/tools . && rm -rf tmp && cd ../.. +chmod -Rf +x packages/Grpc.Tools.1.6.x/tools/ +``` + +Then run the following, depending on your OS: + +**MacOS (x64)** + +```bash +export GRPC_TOOLS=packages/Grpc.Tools.1.6.x/tools/macosx_x64 +``` + +**Linux (x64)** + +```bash +export GRPC_TOOLS=packages/Grpc.Tools.1.6.x/tools/linux_x64 +``` + +The `GRPC_TOOLS` environment variable will point to the appropriate GrpcTools path that matches our operating system and architecture. The last step is to export a variable for the `protoc` program; this is the main program used to generate bindings: + +```bash +export GRPC_PROTOC=$GRPC_TOOLS/protoc +``` + +Now that we can safely run `protoc`, we can download the Tyk Protocol Buffers definition files. These files contain the data structures used by Tyk. See [Data Structures](/api-management/plugins/rich-plugins#rich-plugins-data-structures) for more information: + +```bash +cd ~/tyk-plugin +git clone https://github.com/TykTechnologies/tyk +``` + +##### Generate the bindings + +To generate the bindings, we create an empty directory and run the `protoc` tool using the environment variable that was set before: + +```bash +mkdir Coprocess +$GRPC_PROTOC -I=tyk/coprocess/proto --csharp_out=Coprocess --grpc_out=Coprocess --plugin=protoc-gen-grpc=$GRPC_TOOLS/grpc_csharp_plugin tyk/coprocess/proto/*.proto +``` + +Run the following command to check the binding directory: + +```bash +ls Coprocess +``` + +The output will look like this: + +``` +CoprocessCommon.cs CoprocessObject.cs CoprocessReturnOverrides.cs +CoprocessMiniRequestObject.cs CoprocessObjectGrpc.cs CoprocessSessionState.cs +``` + +##### Implement Server + +Create a file called `Server.cs`. + +Add the following code to `Server.cs`. + +```c# +using System; +using System.Threading.Tasks; +using Grpc.Core; + +using Coprocess; + +class DispatcherImpl : Dispatcher.DispatcherBase +{ + public DispatcherImpl() + { + Console.WriteLine("Instantiating DispatcherImpl"); + } + + + // The Dispatch method will be called by Tyk for every configured hook, we'll implement a very simple dispatcher here: + public override Task Dispatch(Coprocess.Object thisObject, ServerCallContext context) + { + // thisObject is the request object: + Console.WriteLine("Receiving object: " + thisObject.ToString()); + + // hook contains the hook name, this will be defined in our plugin bundle and the implementation will be a method in this class (DispatcherImpl), we'll look it up: + var hook = this.GetType().GetMethod(thisObject.HookName); + + // If hook is null then a handler method for this hook isn't implemented, we'll log this anyway: + if (hook == null) + { + Console.WriteLine("Hook name: " + thisObject.HookName + " (not implemented!)"); + // We return the unmodified request object, so that Tyk can proxy this in the normal way. + return Task.FromResult(thisObject); + }; + + // If there's a handler method, let's log it and proceed with our dispatch work: + Console.WriteLine("Hook name: " + thisObject.HookName + " (implemented)"); + + // This will dynamically invoke our hook method, and cast the returned object to the required Protocol Buffers data structure: + var output = hook.Invoke(this, new object[] { thisObject, context }); + return (Task)output; + } + + // MyPreMiddleware implements a PRE hook, it will be called before the request is proxied upstream and before the authentication step: + public Task MyPreMiddleware(Coprocess.Object thisObject, ServerCallContext context) + { + Console.WriteLine("Calling MyPreMiddleware."); + // We'll inject a header in this request: + thisObject.Request.SetHeaders["my-header"] = "my-value"; + return Task.FromResult(thisObject); + } + + // MyAuthCheck implements a custom authentication mechanism, it will initialize a session object if the token matches a certain value: + public Task MyAuthCheck(Coprocess.Object thisObject, ServerCallContext context) + { + // Request.Headers contains all the request headers, we retrieve the authorization token: + var token = thisObject.Request.Headers["Authorization"]; + Console.WriteLine("Calling MyAuthCheck with token = " + token); + + // We initialize a session object if the token matches "abc123": + if (token == "abc123") + { + Console.WriteLine("Successful auth!"); + var session = new Coprocess.SessionState(); + session.Rate = 1000; + session.Per = 10; + session.QuotaMax = 60; + session.QuotaRenews = 1479033599; + session.QuotaRemaining = 0; + session.QuotaRenewalRate = 120; + session.Expires = 1479033599; + + session.LastUpdated = 1478033599.ToString(); + + thisObject.Metadata["token"] = token; + thisObject.Session = session; + return Task.FromResult(thisObject); + + } + + // If the token isn't "abc123", we return the request object in the original state, without a session object, Tyk will reject this request: + Console.WriteLine("Rejecting auth!"); + return Task.FromResult(thisObject); + } +} +``` + +Create a file called `Program.cs` to instantiate our dispatcher implementation and start a gRPC server. + +Add the following code to `Program.cs`. + +```bash +using System; +using Grpc.Core; + +namespace tyk_plugin +{ + class Program + { + + // Port to attach the gRPC server to: + const int Port = 5555; + + static void Main(string[] args) + { + // We initialize a Grpc.Core.Server and attach our dispatcher implementation to it: + Server server = new Server + { + Services = { Coprocess.Dispatcher.BindService(new DispatcherImpl()) }, + Ports = { new ServerPort("localhost", Port, ServerCredentials.Insecure) } + }; + server.Start(); + + Console.WriteLine("gRPC server listening on " + Port); + Console.WriteLine("Press any key to stop the server..."); + Console.ReadKey(); + + server.ShutdownAsync().Wait(); + + } + } +} +``` + +To run the gRPC server use the following command from the plugin directory: + +```bash +dotnet run +``` + +The gRPC server will listen on port 5555 (as defined in `Program.cs`). In the next steps we'll setup the plugin bundle and modify Tyk to connect to our gRPC server. + +#### Bundle the Plugin + +We need to create a manifest file within the `tyk-plugin` directory. This file contains information about our plugin and how we expect it to interact with the API that will load it. This file should be named `manifest.json` and needs to contain the following: + +```json +{ + "custom_middleware": { + "driver": "grpc", + "auth_check": { + "name": "MyAuthMiddleware", + "path": "", + "raw_body_only": false, + "require_session": false + } + } +} +``` + +- The `custom_middleware` block contains the middleware settings like the plugin driver we want to use (`driver`) and the hooks that our plugin will expose. We use the `auth_check` hook for this tutorial. For other hooks see [here](/api-management/plugins/rich-plugins#coprocess-dispatcher---hooks). +- The `name` field references the name of the function that we implement in our plugin code - `MyAuthMiddleware`. This will be handled by our dispatcher gRPC method (implemented in `Server.cs`). +- The `path` field is the path to the middleware component. +- The `raw_body_only` field +- The `require_session` field, if set to `true` gives you access to the session object. It will be supplied as a session variable to your middleware processor function + + +To bundle our plugin run the following command in the `tyk-plugin` directory. Check your tyk-cli install path first: + +```bash +/opt/tyk-gateway/utils/tyk-cli bundle build -y +``` + +From Tyk v2.8 upwards you can use: +```bash +/opt/tyk-gateway/bin/tyk bundle build -y +``` + +A plugin bundle is a packaged version of the plugin. It may also contain a cryptographic signature of its contents. The `-y` flag tells the Tyk CLI tool to skip the signing process in order to simplify the flow of this tutorial. + +For more information on the Tyk CLI tool, see [here](/api-management/plugins/overview#plugin-bundles). + +You should now have a `bundle.zip` file in the `tyk-plugin` directory. + +#### Publish the Plugin + +To publish the plugin, copy or upload `bundle.zip` to a local web server like Nginx, or Apache or storage like Amazon S3. For this tutorial we'll assume you have a web server listening on `localhost` and accessible through `http://localhost`. + + + +#### What's Next? + +In this tutorial we learned how Tyk gRPC plugins work. For a production-level setup we suggest the following: + +- Configure an appropriate web server and path to serve your plugin bundles. +- See the following [GitHub repo](https://github.com/TykTechnologies/tyk-plugin-demo-dotnet) for a gRPC based .NET plugin that incorporates authentication based on Microsoft SQL Server. + +[1]: /tyk-self-managed/install +[2]: https://github.com/TykTechnologies/tyk-cli +[3]: /img/dashboard/system-management/plugin_options.png +[4]: /img/dashboard/system-management/plugin_auth_mode.png + +### Create Custom Authentication Plugin with NodeJS + +This tutorial will guide you through the creation of a custom authentication plugin for Tyk with a gRPC based plugin written in NodeJS. For additional information about gRPC, check the official documentation [here](https://grpc.io/docs/guides/index.html). + +The sample code that we'll use implements a very simple authentication layer using NodeJS and the proper gRPC bindings generated from our Protocol Buffers definition files. + +gRPC Auth Diagram + +#### Requirements + +- Tyk Gateway: This can be installed using standard package management tools like Yum or APT, or from source code. See [here](/tyk-self-managed/install) for more installation options. +- The Tyk CLI utility, which is bundled with our RPM and DEB packages, and can be installed separately from [https://github.com/TykTechnologies/tyk-cli](https://github.com/TykTechnologies/tyk-cli) +- In Tyk 2.8 and upwards the Tyk CLI is part of the gateway binary, you can find more information by running "tyk help bundle". +- NodeJS v6.x.x [https://nodejs.org/en/download/](https://nodejs.org/en/download/) + +#### Create the Plugin + +##### Create NodeJS Project + +We will use the NPM tool to initialize our project, follow the steps provided by the `init` command: + +```bash +cd ~ +mkdir tyk-plugin +cd tyk-plugin +npm init +``` + +Now we'll add the gRPC package for this project: + +```bash +npm install --save grpc +``` + +##### Install gRPC Tools + +Typically to use gRPC and Protocol Buffers you need to use a code generator and generate bindings for the target language that you're using. For this tutorial we'll skip this step and use the dynamic loader that's provided by the NodeJS gRPC library. This mechanism allows a program to load Protocol Buffers definitions directly from `.proto` files. See [this section](https://grpc.io/docs/tutorials/basic/node.html#loading-service-descriptors-from-proto-files) in the gRPC documentation for more details. + +To fetch the required `.proto` files, you may use an official repository where we keep the Tyk Protocol Buffers definition files: + +```bash +cd ~/tyk-plugin +git clone https://github.com/TykTechnologies/tyk +``` + +##### Implement Server + +Now we're ready to implement our gRPC server, create a file called `main.js` in the project's directory + +Add the following code to `main.js`. + +```nodejs +const grpc = require('grpc'), + resolve = require('path').resolve + +const tyk = grpc.load({ + file: 'coprocess_object.proto', + root: resolve(__dirname, 'tyk/coprocess/proto') +}).coprocess + +const listenAddr = '127.0.0.1:5555', + authHeader = 'Authorization' + validToken = '71f6ac3385ce284152a64208521c592b' + +// The dispatch function is called for every hook: +const dispatch = (call, callback) => { + var obj = call.request + // We dispatch the request based on the hook name, we pass obj.request which is the coprocess.Object: + switch (obj.hook_name) { + case 'MyPreMiddleware': + preMiddleware(obj, callback) + break + case 'MyAuthMiddleware': + authMiddleware(obj, callback) + break + default: + callback(null, obj) + break + } +} + +const preMiddleware = (obj, callback) => { + var req = obj.request + + // req is the coprocess.MiniRequestObject, we inject a header using the "set_headers" field: + req.set_headers = { + 'mycustomheader': 'mycustomvalue' + } + + // Use this callback to finish the operation, sending back the modified object: + callback(null, obj) +} + +const authMiddleware = (obj, callback) => { + var req = obj.request + + // We take the value from the "Authorization" header: + var token = req.headers[authHeader] + + // The token should be attached to the object metadata, this is used internally for key management: + obj.metadata = { + token: token + } + + // If the request token doesn't match the "validToken" constant we return the call: + if (token != validToken) { + callback(null, obj) + return + } + + // At this point the token is valid and a session state object is initialized and attached to the coprocess.Object: + var session = new tyk.SessionState() + session.id_extractor_deadline = Date.now() + 100000000000 + obj.session = session + callback(null, obj) +} + +main = function() { + server = new grpc.Server() + server.addService(tyk.Dispatcher.service, { + dispatch: dispatch + }) + server.bind(listenAddr, grpc.ServerCredentials.createInsecure()) + server.start() +} + +main() +``` + + +To run the gRPC server run: + +```bash +node main.js +``` + +The gRPC server will listen on port `5555` (see the `listenAddr` constant). In the next steps we'll setup the plugin bundle and modify Tyk to connect to our gRPC server. + + +#### Bundle the Plugin + +We need to create a manifest file within the `tyk-plugin` directory. This file contains information about our plugin and how we expect it to interact with the API that will load it. This file should be named `manifest.json` and needs to contain the following: + +```json +{ + "custom_middleware": { + "driver": "grpc", + "auth_check": { + "name": "MyAuthMiddleware", + "path": "", + "raw_body_only": false, + "require_session": false + } + } +} +``` + +- The `custom_middleware` block contains the middleware settings like the plugin driver we want to use (`driver`) and the hooks that our plugin will expose. We use the `auth_check` hook for this tutorial. For other hooks see [here](/api-management/plugins/rich-plugins#coprocess-dispatcher---hooks). +- The `name` field references the name of the function that we implement in our plugin code - `MyAuthMiddleware`. The implemented dispatcher uses a switch statement to handle this hook, and calls the `authMiddleware` function in `main.js`. +- The `path` field is the path to the middleware component. +- The `raw_body_only` field +- The `require_session` field, if set to `true` gives you access to the session object. It will be supplied as a session variable to your middleware processor function + +To bundle our plugin run the following command in the `tyk-plugin` directory. Check your tyk-cli install path first: + +```bash +/opt/tyk-gateway/utils/tyk-cli bundle build -y +``` + +For Tyk 2.8 use: +```bash +/opt/tyk-gateway/bin/tyk bundle build -y +``` + +A plugin bundle is a packaged version of the plugin. It may also contain a cryptographic signature of its contents. The `-y` flag tells the Tyk CLI tool to skip the signing process in order to simplify the flow of this tutorial. + +For more information on the Tyk CLI tool, see [here](/api-management/plugins/overview#plugin-bundles). + +You should now have a `bundle.zip` file in the `tyk-plugin` directory. + +#### Publish the Plugin + +To publish the plugin, copy or upload `bundle.zip` to a local web server like Nginx, Apache or storage like Amazon S3. For this tutorial we'll assume you have a web server listening on `localhost` and accessible through `http://localhost`. + + + + +#### What's Next? + +In this tutorial we learned how Tyk gRPC plugins work. For a production-level setup we suggest the following: + +- Configure an appropriate web server and path to serve your plugin bundles. + +[1]: /tyk-self-managed/install +[2]: https://github.com/TykTechnologies/tyk-cli +[3]: /img/dashboard/system-management/plugin_options.png +[4]: /img/dashboard/system-management/plugin_auth_mode.png + +### Create Custom Authentication Plugin With Python + +In the realm of API security, HMAC-signed authentication serves as a foundational concept. In this developer-focused blog post, we'll use HMAC-signed authentication as the basis for learning how to write gRPC custom authentication plugins with Tyk Gateway. Why learn how to write Custom Authentication Plugins? + +- **Foundational knowledge**: Writing custom authentication plugins provides foundational knowledge of Tyk's extensibility and customization capabilities. +- **Practical experience**: Gain hands-on experience in implementing custom authentication logic tailored to specific use cases, starting with HMAC-signed authentication. +- **Enhanced control**: Exercise greater control over authentication flows and response handling, empowering developers to implement advanced authentication mechanisms beyond built-in features. + +While Tyk Gateway offers built-in support for HMAC-signed authentication, this tutorial serves as a practical guide for developers looking to extend Tyk's capabilities through custom authentication plugins. It extends the gRPC server that we developed in our [getting started guide](/api-management/plugins/rich-plugins#using-python). + +We will develop a basic gRPC server that implements the Tyk Dispatcher service with a custom authentication plugin to handle authentication keys, signed using the HMAC SHA512 algorithm. Subsequently, you will be able to make a request to your API with a HMAC signed authentication key in the *Authorization* header. Tyk Gateway will intercept the request and forward it to your Python gRPC server for HMAC signature and token verification. + +Our plugin will only verify the key against an expected value. In a production environment it will be necessary to verify the key against Redis storage. + +Before we continue ensure that you have: + +- Read and completed our getting started guide that explains how to implement a basic Python gRPC server to echo the request payload received from Tyk Gateway. This tutorial extends the source code of the tyk_async_server.py file to implement a custom authentication plugin for a HMAC signed authentication key. +- Read our HMAC signatures documentation for an explanation of HMAC signed authentication with Tyk Gateway. A brief summary is given in the HMAC Signed Authentication section below. + + +#### HMAC Signed Authentication + +Before diving in further, we will give a brief overview of HMAC signed authentication using our custom authentication plugin. + +- **Client request**: The journey begins with a client requesting access to a protected resource on the Tyk API. +- **HMAC signing**: Before dispatching the request, the client computes an HMAC signature using a secret key and request date, ensuring the payload's integrity. +- **Authorization header**: The HMAC signature, along with essential metadata such as the API key and HMAC algorithm, is embedded within the Authorization header. +- **Tyk Gateway verification**: Upon receipt, Tyk Gateway forwards the request to our gRPC server to execute the custom authentication plugin. This will validate the HMAC signature, ensuring the request's authenticity before proceeding with further processing. + +Requests should be made to an API that uses our custom authentication plugin as follows. A HMAC signed key should be included in the *Authorization* header and a date/time string in the *Date* header. An example request is shown in the curl command below: + +```bash +curl -v -H 'Date: Fri, 03 May 2024 12:00:42 GMT' \ +-H 'Authorization: Signature keyId="eyJvcmciOiI1ZTlkOTU0NGExZGNkNjAwMDFkMGVkMjAiLCJpZCI6ImdycGNfaG1hY19rZXkiLCJoIjoibXVybXVyNjQifQ==", \ +algorithm="hmac-sha512",signature="9kwBK%2FyrjbSHJDI7INAhBmhHLTHRDkIe2uRWHEP8bgQFQvfXRksm6t2MHeLUyk9oosWDZyC17AbGeP8EFqrp%2BA%3D%3D"' \ +http://localhost:8080/grpc-custom-auth/get +``` + +From the above example, it should be noted that: + +- The *Date* header contains a date string formatted as follows: *Fri, 03 May 2024 11:06:00 GMT*. +- The *Authorization* header is formatted as `Signature keyId="", algorithm="", signature=""` where: + + - **keyId** is a Tyk authentication key. + - **algorithm** is the HMAC algorithm used to sign the signature, *hmac-sha512* or *hmac-sha256*. + - **signature** is the HAMC signature calculated with the date string from the *Date* header, signed with a base64 encoded secret value, using the specified HMAC algorithm. The HMAC signature is then encoded as base64. + +#### Prerequisites + +Firstly, we need to create the following: + +- An API configured to use a custom authentication plugin. +- A HMAC enabled key with a configured secret for signing. + +This will enable us to issue a request to test that Tyk Gateway integrates with our custom authentication plugin on the gRPC server. + +###### Create API + +We will create an API served by Tyk Gateway, that will forward requests upstream to https://httpbin.org/. + +The API will have the following parameters configured: + +- **Listen path**: Tyk Gateway will listen to API requests on */grpc-custom-auth/* and will strip the listen path for upstream requests. +- **Target URL**: The target URL will be configured to send requests to *http://httpbin/*. +- **Authentication Mode**: The authentication mode will be configured for custom authentication. This is used to trigger CoProcess (gRPC), Python or JSVM plugins to handle custom authentication. + +You can use the following Tyk Classic API definition to get you started, replacing the *org_id* with the ID of your organization. + +```json +{ + "api_definition": { + "id": "662facb2f03e750001a03500", + "api_id": "6c56dd4d3ad942a94474df6097df67ed", + "org_id": "5e9d9544a1dcd60001d0ed20", + "name": "Python gRPC Custom Auth", + "enable_coprocess_auth": true, + "auth": { + "auth_header_name": "Authorization" + }, + "proxy": { + "preserve_host_header": false, + "listen_path": "/grpc-custom-auth/", + "disable_strip_slash": true, + "strip_listen_path": true, + "target_url": "http://httpbin/" + }, + "version_data": { + "not_versioned": false, + "versions": { + "Default": { + "name": "Default", + "expires": "", + "use_extended_paths": true, + "extended_paths": { + "ignored": [], + "white_list": [], + "black_list": [] + } + } + }, + "default_version": "Default" + }, + "active": true + } +} +``` + +The Tyk API definition above can be imported via Tyk Dashboard. Alternatively, if using Tyk Gateway OSS, a POST request can be made to the *api/apis* endpoint of Tyk Gateway. Consult the [Tyk Gateway Open API Specification documentation](/tyk-gateway-api) for usage. + +An illustrative example using *curl* is given below. Please note that you will need to: + +- Update the location to use the protocol scheme, host and port suitable for your environment. +- Replace the value in the *x-tyk-authorization* header with the secret value in your *tyk.conf* file. +- Replace the *org_id* with the ID of your organization. + +```bash +curl -v \ + --header 'Content-Type: application/json' \ + --header 'x-tyk-authorization: your Gateway admin secret' \ + --location http://localhost:8080/tyk/apis/ \ + --data '{\ + "api_definition": {\ + "id": "662facb2f03e750001a03502",\ + "api_id": "6c56dd4d3ad942a94474df6097df67ef",\ + "org_id": "5e9d9544a1dcd60001d0ed20",\ + "name": "Python gRPC Custom Auth",\ + "enable_coprocess_auth": true,\ + "auth": {\ + "auth_header_name": "Authorization"\ + },\ + "proxy": {\ + "preserve_host_header": false,\ + "listen_path": "/grpc-custom-auth-error/",\ + "disable_strip_slash": true,\ + "strip_listen_path": true,\ + "target_url": "http://httpbin/"\ + },\ + "version_data": {\ + "not_versioned": false,\ + "versions": {\ + "Default": {\ + "name": "Default",\ + "expires": "",\ + "use_extended_paths": true,\ + "extended_paths": {\ + "ignored": [],\ + "white_list": [],\ + "black_list": []\ + }\ + }\ + },\ + "default_version": "Default"\ + },\ + "active": true\ + }\ + }' +``` + +A response similar to that given below will be returned by Tyk Gateway: + +```bash +{ + "key": "f97b748fde734b099001ca15f0346dfe", + "status": "ok", + "action": "added" +} +``` + +###### Create HMAC Key + +We will create an key configured to use HMAC signing, with a secret of *secret*. The key will configured to have access to our test API. + +You can use the following configuration below, replacing the value of the *org_id* with the ID of your organization. + +```bash +{ + "quota_max": 1000, + "quota_renews": 1596929526, + "quota_remaining": 1000, + "quota_reset": 1596843126, + "quota_used": 0, + "org_id": "5e9d9544a1dcd60001d0ed20", + "access_rights": { + "662facb2f03e750001a03500": { + "api_id": "662facb2f03e750001a03500", + "api_name": "Python gRPC Custom Auth", + "versions": ["Default"], + "allowed_urls": [], + "limit": null, + "quota_max": 1000, + "quota_renews": 1596929526, + "quota_remaining": 1000, + "quota_reset": 1596843126, + "quota_used": 0, + "per": 1, + "expires": -1 + } + }, + "enable_detailed_recording": true, + "hmac_enabled": true, + "hmac_string": "secret", + "meta_data": {} +} +``` + +You can use Tyk Gateway’s API to create the key by issuing a POST request to the *tyk/keys* endpoint. Consult the [Tyk Gateway Open API Specification documentation](/tyk-gateway-api) for usage. + +An illustrative example using *curl* is given below. Please note that you will need to: + +- Update the location to use the protocol scheme, host and port suitable for your environment. +- Replace the value in the *x-tyk-authorization* header with the secret value in your *tyk.conf* file. + +Replace the *org_id* with the ID of your organization. + +```bash +curl --location 'http://localhost:8080/tyk/keys/grpc_hmac_key' \ +--header 'x-tyk-authorization: your Gateay admin secret' \ +--header 'Content-Type: application/json' \ +--data '{\ + "alias": "grpc_hmac_key",\ + "quota_max": 1000,\ + "quota_renews": 1596929526,\ + "quota_remaining": 1000,\ + "quota_reset": 1596843126,\ + "quota_used": 0,\ + "org_id": "5e9d9544a1dcd60001d0ed20",\ + "access_rights": {\ + "662facb2f03e750001a03500": {\ + "api_id": "662facb2f03e750001a03500",\ + "api_name": "python-grpc-custom-auth",\ + "versions": ["Default"],\ + "allowed_urls": [],\ + "limit": null,\ + "quota_max": 1000,\ + "quota_renews": 1596929526,\ + "quota_remaining": 1000,\ + "quota_reset": 1596843126,\ + "quota_used": 0,\ + "per": 1,\ + "expires": -1\ + }\ + },\ + "enable_detailed_recording": true,\ + "hmac_enabled": true,\ + "hmac_string": "secret",\ + "meta_data": {}\ +}\ +' +``` + +A response similar to that given below should be returned by Tyk Gateway: + +```json +{ + "key": "eyJvcmciOiI1ZTlkOTU0NGExZGNkNjAwMDFkMGVkMjAiLCJpZCI6ImdycGNfaG1hY19rZXkiLCJoIjoibXVybXVyNjQifQ==", + "status": "ok", + "action": "added", + "key_hash": "a72fcdc09caa86b5" +} +``` + + + +Make a note of the key ID given in the response, since we will need this to test our API. + + + +#### Implement Plugin + +Our custom authentication plugin will perform the following tasks: + +- Extract the *Authorization* and *Date* headers from the request object. +- Parse the *Authorization* header to extract the *keyId*, *algorithm* and *signature* attributes. +- Compute the HMAC signature using the specific algorithm and date included in the header. +- Verify that the computed HMAC signature matches the signature included in the *Authorization* header. A 401 error response will be returned if verification fails. Our plugin will only verify the key against an expected value. In a production environment it will be necessary to verify the key against Redis storage. +- Verify that the *keyId* matches an expected value (VALID_TOKEN). A 401 error response will be returned to Tyk Gateway if verification fails. +- If verification of the signature and key passes then update the session with HMAC enabled and set the HMAC secret. Furthermore, add the key to the *Object* metadata. + +Return the request *Object* containing the updated session back to Tyk Gateway. When developing custom authentication plugins it is the responsibility of the developer to update the session state with the token, in addition to setting the appropriate response status code and error message when authentication fails. + +##### Import Python Modules + +Ensure that the following Python modules are imported at the top of your *tyk_async_server.py* file: + +```python +import asyncio +import base64 +import hashlib +import hmac +import json +import re +import signal +import logging +import urllib.parse + +import grpc +from google.protobuf.json_format import MessageToJson +from grpc_reflection.v1alpha import reflection +import coprocess_object_pb2_grpc +import coprocess_object_pb2 +from coprocess_common_pb2 import HookType +from coprocess_session_state_pb2 import SessionState +``` + +##### Add Constants + +Add the following constants to the top of the *tyk_async_server.py* file, after the import statements: + +```bash +SECRET = "c2VjcmV0" +VALID_TOKEN = "eyJvcmciOiI1ZTlkOTU0NGExZGNkNjAwMDFkMGVkMjAiLCJpZCI6ImdycGNfaG1hY19rZXkiLCJoIjoibXVybXVyNjQifQ==" +``` + +- **SECRET** is a base64 representation of the secret used for HMAC signing. +- **VALID_TOKEN** is the key ID that we will authenticate against. + +The values listed above are designed to align with the examples provided in the *Prerequisites* section, particularly those related to HMAC key generation. If you've made adjustments to the HMAC secret or you've modified the key alias referred to in the endpoint path (for instance, *grpc_hmac_key*), you'll need to update these constants accordingly. + +##### Extract headers + +Add the following function to your *tyk_async_server.py* file to extract a dictionary of the key value pairs from the *Authorization* header. We will use a regular expression to extract the key value pairs. + +```python +def parse_auth_header(auth_header: str) -> dict[str,str]: + pattern = r'(\w+)\s*=\s*"([^"]+)"' + + matches = re.findall(pattern, auth_header) + + parsed_data = dict(matches) + + return parsed_data +``` + +##### Compute HMAC Signature + +Add the following function to your *tyk_async_server.py* to compute the HMAC signature. + +```python +def generate_hmac_signature(algorithm: str, date_string: str, secret_key: str) -> str: + + if algorithm == "hmac-sha256": + hash_algorithm = hashlib.sha256 + elif algorithm == "hmac-sha512": + hash_algorithm = hashlib.sha512 + else: + raise ValueError("Unsupported hash algorithm") + + base_string = f"date: {date_string}" + + logging.info(f"generating signature from: {base_string}") + hmac_signature = hmac.new(secret_key.encode(), base_string.encode(), hash_algorithm) + + return base64.b64encode(hmac_signature.digest()).decode() +``` + +Our function accepts three parameters: + +- **algorithm** is the HMAC algorithm to use for signing. We will use HMAC SHA256 or HMAC SHA512 in our custom authentication plugin +- **date_string** is the date extracted from the date header in the request sent by Tyk Gateway. +- **secret_key** is the value of the secret used for signing. + +The function computes and returns the HMAC signature for a string formatted as *date: date_string*, where *date_string* corresponds to the value of the *date_string* parameter. The signature is computed using the secret value given in the *secret_key* parameter and the HMAC algorithm given in the *algorithm* parameter. A *ValueError* is raised if the hash algorithm is unrecognized. + +We use the following Python modules in our implementation: + +- hmac Python module to compute the HMAC signature. +- base64 Python module to encode the result. + +##### Verify HMAC Signature + +Add the following function to your *tyk_async_server.py* file to verify the HMAC signature provided by the client: + +```python +def verify_hmac_signature(algorithm: str, signature: str, source_string) -> bool: + + expected_signature = generate_hmac_signature(algorithm, source_string, SECRET) + received_signature = urllib.parse.unquote(signature) + + if expected_signature != received_signature: + error = f"Signatures did not match\nreceived: {received_signature}\nexpected: {expected_signature}" + logging.error(error) + else: + logging.info("Signatures matched!") + + return expected_signature == received_signature +``` + +Our function accepts three parameters: + +- **algorithm** is the HMAC algorithm to use for signing. We will use hmac-sha256 or hmac-sha512 in our custom authentication plugin. +- **signature** is the signature string extracted from the *Authorization* header. +- **source_string** is the date extracted from the date header in the request sent by Tyk Gateway. +- **secret_key** is the value of the secret used for signing. + +The function calls *generate_hmac_signature* to verify the signatures match. It returns true if the computed and client HMAC signatures match, otherwise false is returned. + +##### Set Error Response + +Add the following helper function to *tyk_async_server.py* to allow us to set the response status and error message if authentication fails. + +```python +def set_response_error(object: coprocess_object_pb2.Object, code: int, message: str) -> None: + object.request.return_overrides.response_code = code + object.request.return_overrides.response_error = message +``` + +Our function accepts the following three parameters: + +- **object** is an instance of the [Object](/api-management/plugins/rich-plugins#coprocess-object) message representing the payload sent by Tyk Gateway to the *Dispatcher* service in our gRPC server. For further details of the payload structure dispatched by Tyk Gateway to a gRPC server please consult our gRPC documentation. +- **code** is the HTTP status code to return in the response. +- **message** is the response message. + +The function modifies the *return_overrides* attribute of the request, updating the response status code and error message. The *return_overrides* attribute is an instance of a [ReturnOverrides](/api-management/plugins/rich-plugins#returnoverrides) message that can be used to override the response of a given HTTP request. When this attribute is modified the request is terminated and is not sent upstream. + +##### Authenticate + +Add the following to your *tyk_async_server.py* file to implement the main custom authentication function. This parses the headers to extract the signature and date from the request, in addition to verifying the HMAC signature and key: + +```python +def authenticate(object: coprocess_object_pb2.Object) -> coprocess_object_pb2.Object: + keys_to_check = ["keyId", "algorithm", "signature"] + + auth_header = object.request.headers.get("Authorization") + date_header = object.request.headers.get("Date") + + parse_dict = parse_auth_header(auth_header) + + if not all(key in parse_dict for key in keys_to_check) or not all([auth_header, date_header]): + set_response_error(object, 400, "Custom middleware: Bad request") + return object + + try: + signature_valid = verify_hmac_signature( + parse_dict["algorithm"], + parse_dict["signature"], + date_header + ) + except ValueError: + set_response_error(object, 400, "Bad HMAC request, unsupported algorithm") + return object + + if not signature_valid or parse_dict["keyId"] != VALID_TOKEN: + set_response_error(object, 401, "Custom middleware: Not authorized") + else: + new_session = SessionState() + new_session.hmac_enabled = True + new_session.hmac_secret = SECRET + + object.metadata["token"] = VALID_TOKEN + object.session.CopyFrom(new_session) + + return object +``` + +The *Object* payload received from the Gateway is updated and returned as a response from the *Dispatcher* service: + +- If authentication fails then we set the error message and status code for the response accordingly, using our *set_response_error* function. +- If authentication passes then we update the session attribute in the *Object* payload to indicate that HMAC verification was performed and provide the secret used for signing. We also add the verified key to the meta data of the request payload. + +Specifically, our function performs the following tasks: + +- Extracts the *Date* and *Authorization* headers from the request and verifies that the *Authorization* header is structured correctly, using our *parse_auth_header* function. We store the extracted *Authorization* header fields in the *parse_dict* dictionary. If the structure is invalid then a 400 bad request response is returned to Tyk Gateway, using our *set_response_error* function. +- We use our *verify_hmac_signature* function to compute and verify the HMAC signature. A 400 bad request error is returned to the Gateway if HMAC signature verification fails, due to an unrecognized HMAC algorithm. +- A 401 unauthorized error response is returned to the Gateway under the following conditions: + + - The client HMAC signature and the computed HMAC signature do not match. + - The extracted key ID does not match the expected key value in VALID_TOKEN. + +- If HMAC signature verification passed and the key included in the *Authorization* header is valid then we update the *SessionState* instance to indicate that HMAC signature verification is enabled, i.e. *hmac_enabled* is set to true. We also specify the HMAC secret used for signing in the *hmac_secret* field and include the valid token in the metadata dictionary. + +##### Integrate Plugin + +Update the *Dispatch* method of the *PythonDispatcher* class in your *tyk_async_server.py* file so that our authenticate function is called when the a request is made by Tyk Gateway to execute a custom authentication (*HookType.CustomKeyCheck*) plugin. + +```python +class PythonDispatcher(coprocess_object_pb2_grpc.DispatcherServicer): + async def Dispatch( + self, object: coprocess_object_pb2.Object, context: grpc.aio.ServicerContext + ) -> coprocess_object_pb2.Object: + + logging.info(f"STATE for {object.hook_name}\n{MessageToJson(object)}\n") + + if object.hook_type == HookType.Pre: + logging.info(f"Pre plugin name: {object.hook_name}") + logging.info(f"Activated Pre Request plugin from API: {object.spec.get('APIID')}") + + elif object.hook_type == HookType.CustomKeyCheck: + logging.info(f"CustomAuth plugin: {object.hook_name}") + logging.info(f"Activated CustomAuth plugin from API: {object.spec.get('APIID')}") + + authenticate(object) + + elif object.hook_type == HookType.PostKeyAuth: + logging.info(f"PostKeyAuth plugin name: {object.hook_name}") + logging.info(f"Activated PostKeyAuth plugin from API: {object.spec.get('APIID')}") + + elif object.hook_type == HookType.Post: + logging.info(f"Post plugin name: {object.hook_name}") + logging.info(f"Activated Post plugin from API: {object.spec.get('APIID')}") + + elif object.hook_type == HookType.Response: + logging.info(f"Response plugin name: {object.hook_name}") + logging.info(f"Activated Response plugin from API: {object.spec.get('APIID')}") + logging.info("--------\n") + + return object +``` + +#### Test Plugin + +Create the following bash script, *hmac.sh*, to issue a test request to an API served by Tyk Gateway. The script computes a HMAC signature and constructs the *Authorization* and *Date* headers for a specified API. The *Authorization* header contains the HMAC signature and key for authentication. + +Replace the following constant values with values suitable for your environment: + +- **KEY** represents the key ID for the HMAC signed key that you created at the beginning of this guide. +- **HMAC_SECRET** represents the base64 encoded value of the secret for your HMAC key that you created at the beginning of this guide. +- **BASE_URL** represents the base URL, containing the protocol scheme, host and port number that Tyk Gateway listens to for API requests. +- **ENDPOINT** represents the path of your API that uses HMAC signed authentication. + +```bash +#!/bin/bash + +BASE_URL=http://localhost:8080 +ENDPOINT=/grpc-custom-auth/get +HMAC_ALGORITHM=hmac-sha512 +HMAC_SECRET=c2VjcmV0 +KEY=eyJvcmciOiI1ZTlkOTU0NGExZGNkNjAwMDFkMGVkMjAiLCJpZCI6ImdycGNfaG1hY19rZXkiLCJoIjoibXVybXVyNjQifQ== +REQUEST_URL=${BASE_URL}${ENDPOINT} + + +function urlencode() { + echo -n "$1" | perl -MURI::Escape -ne 'print uri_escape($_)' | sed "s/%20/+/g" +} + +# Set date in expected format +date="$(LC_ALL=C date -u +"%a, %d %b %Y %H:%M:%S GMT")" + +# Generate the signature using hmac algorithm with hmac secret from created Tyk key and +# then base64 encoded +signature=$(echo -n "date: ${date}" | openssl sha512 -binary -hmac "${HMAC_SECRET}" | base64) + +# Ensure the signature is base64 encoded +url_encoded_signature=$(echo -n "${signature}" | perl -MURI::Escape -ne 'print uri_escape($_)' | sed "s/%20/+/g") + +# Output the date, encoded date, signature and the url encoded signature +echo "request: ${REQUEST_URL}" +echo "date: $date" +echo "signature: $signature" +echo "url_encoded_signature: $url_encoded_signature" + +# Make the curl request using headers +printf "\n\n----\n\nMaking request to http://localhost:8080/grpc-custom-auth/get\n\n" +set -x +curl -v -H "Date: ${date}" \ + -H "Authorization: Signature keyId=\"${KEY}\",algorithm=\"${HMAC_ALGORITHM}\",signature=\"${url_encoded_signature}\"" \ + ${REQUEST_URL} +``` + +After creating and saving the script, ensure that it is executable by issuing the following command: + +```bash +chmod +x hmac.sh +``` + +Issue a test request by running the script: + +```bash +./hmac.sh +``` + +Observe the output of your gRPC server. You should see the request payload appear in the console output for the server and your custom authentication plugin should have been triggered. An illustrative example is given below: + +```bash +2024-05-13 12:53:49 INFO:root:STATE for CustomHMACCheck +2024-05-13 12:53:49 { +2024-05-13 12:53:49 "hookType": "CustomKeyCheck", +2024-05-13 12:53:49 "hookName": "CustomHMACCheck", +2024-05-13 12:53:49 "request": { +2024-05-13 12:53:49 "headers": { +2024-05-13 12:53:49 "User-Agent": "curl/8.1.2", +2024-05-13 12:53:49 "Date": "Mon, 13 May 2024 11:53:49 GMT", +2024-05-13 12:53:49 "Host": "localhost:8080", +2024-05-13 12:53:49 "Authorization": "Signature keyId=\"eyJvcmciOiI1ZTlkOTU0NGExZGNkNjAwMDFkMGVkMjAiLCJpZCI6ImdycGNfaG1hY19rZXkiLCJoIjoibXVybXVyNjQifQ==\",algorithm=\"hmac-sha512\",signature=\"e9OiifnTDgi3PW2EGJWfeQXCuhuhi6bGLiGhUTFpjEfgdKmX%2FQOFrePAQ%2FAoSFGU%2FzpP%2FCabmQi4zQDPdRh%2FZg%3D%3D\"", +2024-05-13 12:53:49 "Accept": "*/*" +2024-05-13 12:53:49 }, +2024-05-13 12:53:49 "url": "/grpc-custom-auth/get", +2024-05-13 12:53:49 "returnOverrides": { +2024-05-13 12:53:49 "responseCode": -1 +2024-05-13 12:53:49 }, +2024-05-13 12:53:49 "method": "GET", +2024-05-13 12:53:49 "requestUri": "/grpc-custom-auth/get", +2024-05-13 12:53:49 "scheme": "http" +2024-05-13 12:53:49 }, +2024-05-13 12:53:49 "spec": { +2024-05-13 12:53:49 "bundle_hash": "d41d8cd98f00b204e9800998ecf8427e", +2024-05-13 12:53:49 "OrgID": "5e9d9544a1dcd60001d0ed20", +2024-05-13 12:53:49 "APIID": "6c56dd4d3ad942a94474df6097df67ed" +2024-05-13 12:53:49 } +2024-05-13 12:53:49 } +2024-05-13 12:53:49 +2024-05-13 12:53:49 INFO:root:CustomAuth plugin: CustomHMACCheck +2024-05-13 12:53:49 INFO:root:Activated CustomAuth plugin from API: 6c56dd4d3ad942a94474df6097df67ed +2024-05-13 12:53:49 INFO:root:generating signature from: date: Mon, 13 May 2024 11:53:49 GMT +2024-05-13 12:53:49 INFO:root:Signatures matched! +2024-05-13 12:53:49 INFO:root:-------- +``` + +Try changing the SECRET and/or KEY constants with invalid values and observe the output of your gRPC server. You should notice that authentication fails. An illustrative example is given below: + +``` +2024-05-13 12:56:37 INFO:root:STATE for CustomHMACCheck +2024-05-13 12:56:37 { +2024-05-13 12:56:37 "hookType": "CustomKeyCheck", +2024-05-13 12:56:37 "hookName": "CustomHMACCheck", +2024-05-13 12:56:37 "request": { +2024-05-13 12:56:37 "headers": { +2024-05-13 12:56:37 "User-Agent": "curl/8.1.2", +2024-05-13 12:56:37 "Date": "Mon, 13 May 2024 11:56:37 GMT", +2024-05-13 12:56:37 "Host": "localhost:8080", +2024-05-13 12:56:37 "Authorization": "Signature keyId=\"eyJvcmciOiI1ZTlkOTU0NGExZGNkNjAwMDFkMGVkMjAiLCJpZCI6ImdycGNfaG1hY19rZXkiLCJoIjoibXVybXVyNjQifQ==\",algorithm=\"hmac-sha512\",signature=\"KXhkWOS01nbxuFfK7wEBggkydXlKJswxbukiplboJ2n%2BU6JiYOil%2Bx4OE4edWipg4EcG9T49nvY%2Fc9G0XFJcfg%3D%3D\"", +2024-05-13 12:56:37 "Accept": "*/*" +2024-05-13 12:56:37 }, +2024-05-13 12:56:37 "url": "/grpc-custom-auth/get", +2024-05-13 12:56:37 "returnOverrides": { +2024-05-13 12:56:37 "responseCode": -1 +2024-05-13 12:56:37 }, +2024-05-13 12:56:37 "method": "GET", +2024-05-13 12:56:37 "requestUri": "/grpc-custom-auth/get", +2024-05-13 12:56:37 "scheme": "http" +2024-05-13 12:56:37 }, +2024-05-13 12:56:37 "spec": { +2024-05-13 12:56:37 "bundle_hash": "d41d8cd98f00b204e9800998ecf8427e", +2024-05-13 12:56:37 "OrgID": "5e9d9544a1dcd60001d0ed20", +2024-05-13 12:56:37 "APIID": "6c56dd4d3ad942a94474df6097df67ed" +2024-05-13 12:56:37 } +2024-05-13 12:56:37 } +2024-05-13 12:56:37 +2024-05-13 12:56:37 INFO:root:CustomAuth plugin: CustomHMACCheck +2024-05-13 12:56:37 INFO:root:Activated CustomAuth plugin from API: 6c56dd4d3ad942a94474df6097df67ed +2024-05-13 12:56:37 INFO:root:generating signature from: date: Mon, 13 May 2024 11:56:37 GMT +2024-05-13 12:56:37 ERROR:root:Signatures did not match +2024-05-13 12:56:37 received: KXhkWOS01nbxuFfK7wEBggkydXlKJswxbukiplboJ2n+U6JiYOil+x4OE4edWipg4EcG9T49nvY/c9G0XFJcfg== +2024-05-13 12:56:37 expected: zT17C2tgDCYBJCgFFN/mknf6XydPaV98a5gMPNUHYxZyYwYedIPIhyDRQsMF9GTVFe8khCB1FhfyhpmzrUR2Lw== +``` + +#### Summary + +In this guide, we've explained how to write a Python gRPC custom authentication plugin for Tyk Gateway, using HMAC-signed authentication as a practical example. Through clear instructions and code examples, we've provided developers with insights into the process of creating custom authentication logic tailored to their specific API authentication needs. + +While Tyk Gateway already supports HMAC-signed authentication out of the box, this guide goes beyond basic implementation by demonstrating how to extend its capabilities through custom plugins. By focusing on HMAC-signed authentication, developers have gained valuable experience in crafting custom authentication mechanisms that can be adapted and expanded to meet diverse authentication requirements. + +It's important to note that the authentication mechanism implemented in this guide solely verifies the HMAC signature's validity and does not include access control checks against specific API resources. Developers should enhance this implementation by integrating access control logic to ensure authenticated requests have appropriate access permissions. + +By mastering the techniques outlined in this guide, developers are better equipped to address complex authentication challenges and build robust API security architectures using Tyk Gateway's extensibility features. This guide serves as a foundation for further exploration and experimentation with custom authentication plugins, empowering developers to innovate and customize API authentication solutions according to their unique requirements. + +--- + +
+ +### Performance + +These are some benchmarks performed on gRPC plugins. + +gRPC plugins may use different transports, we've tested TCP and Unix Sockets. + +#### TCP + +TCP Response Times + +TCP Hit Rate + +#### Unix Socket + +Unix Socket Response Times + +Unix Socket Hit Rate + +--- + +## Using Lua + +### Overview + +#### Requirements + +Tyk uses [LuaJIT](http://luajit.org/). The main requirement is the LuaJIT shared library, you may find this as `libluajit-x` in most distros. + +For Ubuntu 14.04 you may use: + +`$ apt-get install libluajit-5.1-2 +$ apt-get install luarocks` + +The LuaJIT required modules are as follows: + +* [lua-cjson](https://github.com/mpx/lua-cjson): in case you have `luarocks`, run: `$ luarocks install lua-cjson` + +#### How to write LuaJIT Plugins + +We have a demo plugin hosted in the repo [tyk-plugin-demo-lua](https://github.com/TykTechnologies/tyk-plugin-demo-lua). The project implements a simple middleware for header injection, using a Pre hook (see [Tyk custom middleware hooks](/api-management/plugins/javascript#using-javascript-with-tyk)) and [mymiddleware.lua](https://github.com/TykTechnologies/tyk-plugin-demo-lua/blob/master/mymiddleware.lua). +#### Lua Performance +Lua support is currently in beta stage. We are planning performance optimizations for future releases. +#### Tyk Lua API Methods +Tyk Lua API methods aren’t currently supported. + +### Lua Plugin Tutorial + +#### Settings in the API Definition + +To add a Lua plugin to your API, you must specify the bundle name using the `custom_middleware_bundle` field: + +```json +{ + "name": "Tyk Test API", + "api_id": "1", + "org_id": "default", + "definition": { + "location": "header", + "key": "version" + }, + "auth": { + "auth_header_name": "authorization" + }, + "use_keyless": true, + "version_data": { + "not_versioned": true, + "versions": { + "Default": { + "name": "Default", + "expires": "3000-01-02 15:04", + "use_extended_paths": true, + "extended_paths": { + "ignored": [], + "white_list": [], + "black_list": [] + } + } + } + }, + "proxy": { + "listen_path": "/quickstart/", + "target_url": "http://httpbin.org", + "strip_listen_path": true + }, + "custom_middleware_bundle": "test-bundle", +} +``` + +#### Global settings + +To enable Lua plugins you need to add the following block to `tyk.conf`: + +```json +"coprocess_options": { + "enable_coprocess": true, +}, +"enable_bundle_downloader": true, +"bundle_base_url": "http://my-bundle-server.com/bundles/", +"public_key_path": "/path/to/my/pubkey", +``` + +`enable_coprocess` enables the rich plugins feature. + +`enable_bundle_downloader` enables the bundle downloader. + +`bundle_base_url` is a base URL that will be used to download the bundle, in this example we have "test-bundle" specified in the API settings, Tyk will fetch the following URL: `http://my-bundle-server.com/bundles/test-bundle`. + +`public_key_path` sets a public key, this is used for verifying signed bundles, you may omit this if unsigned bundles are used. + +#### Running the Tyk Lua build + +To use Tyk with Lua support you will need to use an alternative binary, it is provided in the standard Tyk package but it has a different service name. + +Firstly stop the standard Tyk version: + +```console +service tyk-gateway stop +``` + +and then start the Lua build: + +```console +service tyk-gateway-lua start +``` + + diff --git a/api-management/policies.mdx b/api-management/policies.mdx new file mode 100644 index 000000000..9bf5b7dec --- /dev/null +++ b/api-management/policies.mdx @@ -0,0 +1,1054 @@ +--- +title: "Security Policy and Access Keys" +description: "How to create and use policies and access keys in Tyk" +keywords: "Policies, Security, Security Policy, Access Key, API Key" +sidebarTitle: "Security Policy and Access Keys" +--- + +## Introduction + +In Tyk, a security policy acts as a template for access control and rate limiting. It can be applied to multiple access keys, OAuth clients, or JWT tokens, allowing you to manage API access at scale. + +Access keys, on the other hand, are the tokens that clients use to authenticate and access your APIs. These keys can either have their own individual settings or inherit settings from one or more security policies. + +By leveraging security policies and access keys together, you can: + +- Standardize access control across multiple users or applications. +- Easily update access rights for groups of users. +- Implement tiered access levels (e.g., basic, premium, enterprise). +- Manage and monitor API usage effectively. + +In the following sections, we'll explore how to create and manage security policies and access keys using both the Tyk Dashboard and API. + +## What is a Security Policy + +A Tyk security policy incorporates several security options that can be applied to an API key. It acts as a template that can override individual sections of an API key (or identity) in Tyk. For example, if you had 10,000 API keys issued, how would you ensure that all 10,000 users received an upgraded quota or access a new API that you have published? + +Using policies provides a more scalable and manageable way to control access compared to configuring each key separately, especially when dealing with large numbers of keys. +You could manually modify all 10,000 keys, or you could apply a policy to each of those keys when you create them, and then just modify the policy once. + +**Policies can set:** + +* Access lists for API and versions +* Access lists for method and path (granular control) +* Rate limit for a user +* Quota for a user +* Add tags and metadata + +Each of these can also be overridden in isolation using the partitioning options. When partitioning a policy, only one segment of the policy will be applied to the key. So, for example, if you need to set quotas and rate limits on the user level, but want to manage access control across all of your users, a partitioned policy with only the ACL enabled would achieve this. + +## Relationship between Security Policy and Access Key + +A security policy acts as a template that defines access rights, rate limits, quotas and other security settings whereas an access key (API key) is issued to a client/user to authenticate and access APIs. + +When creating an access key, you can apply one or more security policies to it. This associates the policy settings with that specific key. The policy settings then override the individual settings on the key itself. + +This allows you to manage access controls and limits for groups of keys by just modifying the policy, rather than updating each key individually. You can apply multiple policies to a single key, allowing for flexible combinations of access rights. + +When a request comes in with an access key, Tyk will evaluate the associated policies to determine the permissions and limits for that key. + +In essence, security policies provide a reusable template of settings that can be easily applied to many access keys, simplifying management and allowing for centralized control of API access and usage limits. + +## Policies Guide + +A Tyk policy looks just like the session object that is used when you create a new key: + +```{.copyWrapper} +{ + org_id: "53ac07777cbb8c2d53000002", + rate: 3, + per: 1, + quota_max: 1000, + quota_renewal_rate: 90000, + access_rights: { + b605a6f03cc14f8b74665452c263bf19: { + apiname: "Tyk Test API", + apiid: "b605a6f03cc14f8b74665452c263bf19", + versions: [ + "Default" + ], + allowed_urls: [] + }, + "3b7e73fd18794f146aab9c2e07b787bf": { + apiname: "Second Test API", + apiid: "3b7e73fd18794f146aab9c2e07b787bf", + versions: [ + "Test" + ], + allowed_urls: [] + } + }, + active: true, + is_inactive: false, + tags: [], + key_expires_in: 0 +} +``` + +Here you can see the various fields as they are applied to Tyk keys, these are all described in the Keys section of the [Gateway API](/tyk-gateway-api). + +The important differences here are two new additions: + +- The `active` flag must be set to `true` for Tyk to load the policy into memory, this makes it easy to enable or disable policies without deleting them. + +- Secondly, the `is_inactive` flag applies to the key itself. If you set this value to `true`, any key with this policy will be disabled, you can actually set this same value on a key object to make the single key inactive, but as part of a policy it makes it possible to deny access to a whole block of users with a single change. + +### Trial keys + +It is possible to have a policy create "Trial" keys, these are keys with a fixed expiry date set in the number of seconds from the time of the keys creations. + +Although key expiry can be set in the session object on creation, when a key is created using the portal or a key request it will have a default expiry time. + +To set a trial key expiry, simply add: + +```{.copyWrapper} +`key_expires_in: 50000` +``` + +To the policy object, when the key is generated, the expiry will be forced. + +### Configuring Pro Edition to use a policy list + +Tyk Pro (The Dashboard) has policies enabled by default. + +### Configuring the Open Source Edition to use a policy list + +If your Tyk configuration is standalone and configuration is being managed via the Gateway API without the support of the dashboard, then you will need to set the `policies section` in your configuration file as follows: + +```{.copyWrapper} +"policies": { + "policy_source": "file", + "policy_record_name": "./policies/policies.json" +}, +``` + +Here the `policy_source` section is set to `file` and tells Tyk to look for policy record in the file specified in the `policy_record_name` field. An example file is shipped with Tyk, and it will look like this: + +```{.copyWrapper} +{ + "default": { + "rate": 1000, + "per": 1, + "quota_max": 100, + "quota_renewal_rate": 60, + "access_rights": { + "41433797848f41a558c1573d3e55a410": { + "api_name": "My API", + "api_id": "41433797848f41a558c1573d3e55a410", + "versions": [ + "Default" + ] + } + }, + "org_id": "54de205930c55e15bd000001", + "hmac_enabled": false + } +} +``` + +The record is a single JSON object, with each named key representing the policy ID, so you can list multiple policies within the single JSON object. In the above example we have only defined a single policy called `default`. + +### Applying a policy to a key + +To apply the above policy to a key, we simply need to call the `/create` (or `/add`) endpoint in the Tyk REST API with a session object that has the `apply_policy_id` flag set to the name `default` (or whatever you named your policy). + + + +Although `apply_policy_id` is still supported, it is now deprecated. `apply_policies` is now used to list your policy IDs as an array. This supports the **Multiple Policy** feature introduced in the **v2.4** release. + + + + +```{.copyWrapper} +{ + "allowance": 2, + "rate": 3, + "per": 1, + "expires": 0, + "quota_max": 1000, + "quota_renews": 1429804261, + "quota_remaining": 1000, + "quota_renewal_rate": 90000, + "access_rights": {}, + "org_id": "53ac07777cbb8c2d53000002", + "EnableHTTPSignatureValidation": false, + "hmac_enabled": false, + "hmac_string": "", + "is_inactive": false, + "apply_policy_id": "default", + "apply_policies": [ + "59672779fa4387000129507d", + "53222349fa4387004324324e", + "543534s9fa4387004324324d" + ] +} +``` + +Although we have set the main factors of the key, they will be overridden by the policy as soon as the key is loaded, this will happen each time the key appears, so modifying a policy will have an instant effect on the token. + +### How You Can Create Policies + +[With the Dashboard API](/api-management/gateway-config-managing-classic#create-a-security-policy-with-the-api) + +[With the Gateway API - Open Source tab](/api-management/gateway-config-managing-classic#secure-an-api) + +[With the Dashboard](/api-management/gateway-config-managing-classic#create-a-security-policy-with-the-dashboard) + +--- +--- +--- + +### Secure your APIs by Method and Path + +Tyk already lets you set version access rights, allowed, and blocked paths to control how your users access your APIs, however what has not been easy to do is to restrict access based on specific paths, per key or policy. + +Granular path control allows you to define which methods and paths a key is allowed to access on a per API-version basis. This can be done on a key-by-key basis, or, for even more power and control, through the Policies feature. + +With this feature it is possible to set up tiered access policies for your users, so if you offer read only, free and extended access to your APIs and are charging for the higher levels, you can encode these tiers into policies, and use the granular path control feature to limit what paths and methods the keys with those access policies can access. + +Or, alternatively, you could just upgrade a single key to have more access, both methods use the same, or similar areas of the configuration to make this possible. + + + +Granular permissions are applied *after* version-based (global) allowlist/blocklist rules. + + + + +#### Setting granular paths on a per-key basis + +Let's take a look at a key session definition: + +```{.copyWrapper} +{ + "last_check": 0, + "allowance": 2, + "rate": 3, + "per": 1, + "expires": -1, + "quota_max": 1000, + "quota_renews": 1429804261, + "quota_remaining": 994, + "quota_renewal_rate": 90000, + "access_rights": { + "3b7e73fd18794f146aab9c2e07b787bf": { + "api_name": "Second Test API", + "api_id": "3b7e73fd18794f146aab9c2e07b787bf", + "versions": [ + "Test" + ], + "allowed_urls": [] + }, + "b605a6f03cc14f8b74665452c263bf19": { + "api_name": "Tyk Test API", + "api_id": "b605a6f03cc14f8b74665452c263bf19", + "versions": [ + "Default" + ], + "allowed_urls": [] + } + }, + "org_id": "53ac07777cbb8c2d53000002", + "oauth_client_id": "", + "basic_auth_data": {}, + "hmac_enabled": false, + "hmac_string": "", + "is_inactive": false +} +``` + +Within the `access_rights` section, in each version definition, we can see an `allowed_urls` section, here we can define which URLs are enabled in this key as follows: + +```{.copyWrapper} + "allowed_urls": [ + { + "url": "/resource/(.*)", + "methods": ["GET", "POST"] + } + ] +``` + +Each entry must be a valid Regex pattern and use the [Go syntax](https://golang.org/pkg/regexp/syntax/) (unfortunately Tyk does not accept regular expressions written with the Perl syntax at this time). Methods are case sensitive. This is an allow list, and can be used to define exactly what kind of access a key can have to your API. + +#### Using granular control with a key template + +This feature is much more powerful when applied to key templates and the policies feature, within the policy definition you can add the same section: + +```{.copyWrapper} +{ + "default": { + "rate": 1000, + "per": 1, + "quota_max": 100, + "quota_renewal_rate": 60, + "access_rights": { + "41433797848f41a558c1573d3e55a410": { + "api_name": "My API", + "api_id": "41433797848f41a558c1573d3e55a410", + "versions": [ + "Default" + ], + "allowed_urls": [ + { + "url": "/resource/(.*), + "methods": ["GET", "POST"] + } + ] + } + }, + "org_id": "54de205930c55e15bd000001", + "hmac_enabled": false + } +} +``` + +These paths will be copied into the active key session the next time a key that is using this policy appears. + +## Partitioned Policies + +Creating a policy where access rights, usage quota and rate limit are set in stone may not suit your use case. Instead, you may wish to have only one or two segments of a token managed at policy level and the other segments managed at key level or by another policy. + +### Example Use Case + +You have different tiers of rate limiting as follows: + +* Tier A has access to the API at a rate of 1000 per 60 seconds +* Tier B a rate of 500 per 60 seconds +* Tier C a rate of 250 per 60 seconds + +You could create three separate policies that allow the same access rights and usage quota but have different rate limiting, or, you could create one policy and partition it by enforcing only access rights and usage quota, leaving rate limiting to be defined at key level or by another policy. + +Because the access rights and usage quota are enforced at policy level, you can only make changes to them within the policy. Any changes will then be inherited by all keys with that policy applied without affecting the rate limit defined at key level. + +A partitioned policy can enforce any of these elements individually or together on a key: + +* The Access Control List (ACL), configured using the `access_rights` field + * When applying partitioned policies to a key, at least one of these policies needs to enforce ACL +* The Rate limit +* The Quota limit +* The GraphQL complexity (currently only query-depth limit is supported) + +### Set up a partition in an API + +You can partition your policy by adding a `partitions` section to your policy object: + +```{.json} +"partitions": { + "quota": false, + "rate_limit": false, + "acl": false, + "complexity": false +} +``` + +* `quota`: If set to `true`, enforce the quota element of this policy +* `rate_limit`: If set to `true`, enforce the rate limit of this policy +* `acl`: If set to `true`, enforce the access control rules of this policy +* `complexity`: If set to `true`, enforce the GraphQL complexity rules of this policy + +Partitions can be applied together, if you select all of them then essentially the whole policy will be enforced. + +### Set up a partition in the Tyk Dashboard + +Once you have added access rights to your policy, open the Global Limits and Quota panel. You’ll see the Policy Partitioning section where you can uncheck Access Rights, Usage Quota or Rate Limiting to enable their value to be defined at key level. + +For example, the screenshot below shows that rate limit has not been enforced and therefore can be defined at key level when this policy is applied to a key. + +Global Limits + +### Partitioned Policy Functionality + +In Gateway v2.4 and Dashboard v1.4 We extended support for partitioned policies, and you can now apply multiple when creating a key. We’ll cover all combinations and how you can expect the key to react. + + + +#### Applying partitioned policies to a key with the same segments enforced + +If you apply partitioned policies to a key with the same segments enforced, you will be able to override any segment that has not been enforced and define new rules specific to that key. + +**Example One** - Single Policy: Policy A has access rights and usage quota enforced meaning the rate limiting can be defined at key level. + +**Example Two** - Multiple Policies: Policy A and Policy B have access rights and usage quota enforced meaning the rate limiting defined at key level will be inherited by both policies. + +```{.json} +{ + "policy_a": { + "access_rights": { + "1": { + "api_name": "API One", + "api_id": "1", + "versions": [ + "Default" + ] + } + }, + "active": true, + "id": "policy_a", + "name": "policy_a", + "partitions": { + "acl": true, + "complexity": false, + "per_api": false, + "quota": true, + "rate_limit": false + }, + "quota_max": 100, + "quota_renewal_rate": 3600, + "state": "active", + "tags": [] + }, + "policy_b": { + "access_rights": { + "2": { + "api_name": "API Two", + "api_id": "2", + "versions": [ + "Default" + ] + } + }, + "active": true, + "id": "policy_b", + "name": "policy_b", + "partitions": { + "acl": true, + "complexity": false, + "per_api": false, + "quota": true, + "rate_limit": false + }, + "quota_max": 50, + "quota_renewal_rate": 3600, + "state": "active", + "tags": [] + } +} +``` + +##### Use Case + +You want to give access to the same API with the same usage quota but define separate rate limits for various developers. + +#### Applying partitioned policies to a key with different segments enforced + +For ultimate flexibility, you can create policies that each have only one segment enforced. Instead of creating multiple policies that cover a variety of scenarios you can create a few as building blocks to create unique combinations that suit your needs. + +**Example:** + +Policy A has API 1 enforced +Policy B has API 2 enforced +Policy C has a rate limit of 1000 per 60 seconds enforced +Policy D has a rate limit of 2000 per 60 seconds enforced +Policy E has an unlimited request usage quota enforced +Policy F has 10,000 requests per hour usage quota enforced + +If Policy A, C and E is applied to a key it will give access to API 1 at a rate of 1000 per 60 seconds with unlimited requests. + +If Policy A, D and E is applied to a key it will give access to API 1 at a rate of 2000 per 60 seconds with unlimited requests. + +```{.json} +{ + "policy_a": { + "access_rights": { + "1": { + "api_name": "API 1", + "api_id": "1", + "versions": [ + "Default" + ] + } + }, + "active": true, + "id": "policy_a", + "name": "policy_a", + "partitions": { + "acl": true, + "complexity": false, + "per_api": false, + "quota": false, + "rate_limit": false + }, + "state": "active", + "tags": [] + }, + "policy_b": { + "access_rights": { + "2": { + "api_name": "API 2", + "api_id": "2", + "versions": [ + "Default" + ] + } + }, + "active": true, + "id": "policy_b", + "name": "policy_b", + "partitions": { + "acl": true, + "complexity": false, + "per_api": false, + "quota": false, + "rate_limit": false + }, + "state": "active", + "tags": [] + }, + "policy_c": { + "access_rights": {}, + "active": true, + "id": "policy_c", + "name": "policy_c", + "partitions": { + "acl": false, + "complexity": false, + "per_api": false, + "quota": false, + "rate_limit": true + }, + "per": 60, + "rate": 1000, + "state": "active", + "tags": [], + "throttle_interval": -1, + "throttle_retry_limit": -1 + }, + "policy_d": { + "access_rights": {}, + "active": true, + "id": "policy_d", + "name": "policy_d", + "partitions": { + "acl": false, + "complexity": false, + "per_api": false, + "quota": false, + "rate_limit": true + }, + "per": 60, + "rate": 2000, + "state": "active", + "tags": [], + "throttle_interval": -1, + "throttle_retry_limit": -1 + }, + "policy_e": { + "access_rights": {}, + "active": true, + "id": "policy_e", + "name": "policy_e", + "partitions": { + "acl": false, + "complexity": false, + "per_api": false, + "quota": true, + "rate_limit": false + }, + "quota_max": -1, + "quota_renewal_rate": -1, + "state": "active", + "tags": [], + "throttle_interval": -1, + "throttle_retry_limit": -1 + }, + "policy_f": { + "access_rights": {}, + "active": true, + "id": "policy_f", + "name": "policy_f", + "partitions": { + "acl": false, + "complexity": false, + "per_api": false, + "quota": true, + "rate_limit": false + }, + "quota_max": 10000, + "quota_renewal_rate": 3600, + "state": "active", + "tags": [], + "throttle_interval": -1, + "throttle_retry_limit": -1 + } +} +``` + +##### Use Case + +You have 20 developer keys that use a combination of Policy A, B, C, D, E and F and have decided that you’d now like to alter Policy D’s rate limit to 3000 per 60 seconds. All keys with Policy D applied will now inherit the new value instantly. If you had created each of the keys without using policies you would have to find and edit each key manually. + +#### Applying both a partitioned policy and a non-partitioned policy to a key + +If you apply both a partitioned policy and a non-partitioned policy to the same key, any segments that have not been enforced in the partitioned policy will inherit the values in the non-partitioned policy. + +##### Example + +Policy A has enforced access to API 1 with a rate limit of 1000 per 60 seconds and unlimited requests for the usage quota. +Policy B only has enforced access to API 2 + +If both policies were applied to a key, Policy B would automatically inherit Policy A’s rate limit and usage quota because Policy B did not have rate limit or usage quota enforced. + +```{.json} +{ + "policy_a": { + "access_rights": { + "1": { + "api_name": "API One", + "api_id": "1", + "versions": [ + "Default" + ] + } + }, + "active": true, + "partitions": { + "acl": true, + "complexity": false, + "per_api": false, + "quota": true, + "rate_limit": true + }, + "per": 60, + "quota_max": -1, + "quota_renewal_rate": -1, + "rate": 1000, + "state": "active", + "tags": [], + "throttle_interval": -1, + "throttle_retry_limit": -1 + }, + "policy_b": { + "access_rights": { + "2": { + "api_name": "API Two", + "api_id": "2", + "versions": [ + "Default" + ] + } + }, + "active": true, + "partitions": { + "acl": true, + "complexity": false, + "per_api": false, + "quota": false, + "rate_limit": false + }, + "state": "active", + "tags": [] + } +} +``` + +##### Use Case + +A developer already has a key that gives access to Policy A and now requires access to another API product. The developer is already paying for a specific rate and limit and just needs access to the additional API. Instead of editing Policy A to allow for the additional API access (which would then affect all keys with this policy applied), we can instead create Policy B and combine the two, allowing the additional API in Policy B to inherit the same rate and limit the developer requires. + + + +For v2.4 and 1.4 multiple policies are only supported only via the Add Key section and via the API. +Support oAuth, and Portal API Catalogs are planned for subsequent releases. +Support of multiple policies for JWT and OIDC is done through the API definition when using scopes. + + + +## Access Keys + +### Access Key Expiry + +Key Expiry allows you to set the lifetime of tokens, ensuring a regular re-cycling of API tokens. If a key has expired Tyk will no longer let requests through on a token, however this **does not mean** that Tyk will remove the key. + +#### Token Expiry Behavior and Time-To-Live + +If a key is expired, Tyk will return a warning that the token has expired to the end user. If a token has been deleted, then Tyk will return an access denied response to the client. This is an important difference. In some cases, API tokens are hard-coded (this is terrible practice, but it does happen far more often than you might think). In this case it is extremely expensive to replace the token if it has expired. + +In the above case, if a token had been deleted because the **Time To Live** of the token matched it's expiry time, then the end user would need to replace the token with a new one. However, because we do not expire the key it is possible for an administrator to reset the expiry of the token to allow access and manage renewal in a more granular way. + +#### Timestamp format on a session object + +Tyk manages timestamps in the Unix timestamp format - this means that when a date is set for expiry it should be converted to a Unix timestamp (usually a large integer) which shows seconds since the epoch (Jan 1 1970). This format is used because it allows for faster processing and takes into account timezone differences without needing localisation. + +Key sessions are created and updated using the Tyk Gateway API, in order to set the expiry date for a key, update the `expires` value with a Unix timestamp of when the key should expire. + +
+ + + +`expires` can only be a positive number, or `0` if you don't want the key to expire. + + + + +#### How to delete expired tokens + +In order to not clutter the database with expired tokens, Tyk provides a way to force a TTL on all keys, this is a maximum time to live and should always be significantly larger than your maximum expiry setting. This setting must be set on a per-API basis. + +To enforce a TTL, set the `session_lifetime` value (in seconds) in your API Definition Object, this will need to be managed via the Dashboard REST API. + +### Access Key Hashing + +Tyk stores all API Tokens and their equivalent Session Objects in a Redis DB. Because of this, Tyk will, by default, obfuscate the tokens in Redis using a key hash. + +#### Default Key Hash Algorithm + +To find a balance between performance and security, the default algorithm used by Tyk to do the hashing is `murmur3`, and serves more to obfuscate than to cryptographically secure the tokens. + +It is possible to disable key hashing in Tyk using `hash_keys` set to `false` in your `tyk.conf` and `tyk_analytics.conf`. + +See the [Gateway Configuration Options](/tyk-oss-gateway/configuration) for more details. + +#### Custom Key Hash Algorithms + +To set a custom algorithm, you need to set `hash_key_function` in your `tyk.conf` to one of the following options: + +* `murmur32` +* `murmur64` +* `murmur128` +* `sha256` + +MurMur non-cryptographic hash functions are considered as the industry fastest and conflict-prone algorithms up to date, which gives a nice balance between security and performance. With this change you now you can choose the different hash length, depending on your organization security policies. We have also introduced a new `sha256` cryptographic key hashing algorithm, for cases when you are willing to sacrifice some performance for additional security. + +Performance wise, setting new key hashing algorithms can increase the key hash length, as well as key length itself, so expect that your analytics data size to grow (but not that much, up to about 10%). Additionally, if you set the `sha256` algorithm, it will significantly slowdown Tyk, because cryptographic functions are slow by design but very secure. + +Technically wise, it is implemented by new key generation algorithms, which now embed additional metadata to the key itself, and if you are curious about the actual implementation details, feel free to check the following [pull request](https://github.com/TykTechnologies/tyk/pull/1753). + +Changing hashing algorithm is entirely backward compatible. All your existing keys will continue working with the old `murmur32` hashing algorithm, and your new keys will use the algorithm specified in your `tyk.conf`. Moreover, changing algorithms is also backward compatible, and Tyk will maintain keys with multiple hashing algorithms without any issues. + +A hashed installation imposes some constraints on how Tyk is used: + +* Listing tokens requires setting `enable_hashed_keys_listing` to `true` in your `tyk.conf` file +* Tokens appear in Analytics in their hashed form + + + + + Switching from a hashed installation to non-hashed means all existing tokens cannot be used (they will not be correctly validated). + + + + +#### Using Hashed Keys Endpoints + +- endpoints `POST /keys/create`, `POST /keys` and `POST /keys/{keyName}` also return the field `"key_hash"` for future use +- endpoint `GET /keys` get all (or per API) key hashes. You can disable this endpoint by using the new `tyk.conf` setting `enable_hashed_keys_listing` (set to `false` by default) +- endpoint `GET /keys/{keyName}` was modified to be able to get a key by hash. You just need provide the key hash as a `keyName` +and call it with the new optional query parameter `hashed=true`. So the new format is `GET /keys/{keyName}?hashed=true"` +- we also have the same optional parameter for endpoint `DELETE /keys/{keyName}?hashed=true` and call it with the optional query parameter `hashed=true`. So the format is `GET /keys/{keyName}?hashed=true"` +- The same optional parameter is available for the `DELETE /keys/{keyName}?hashed=true` endpoint + +See the Keys section of [Tyk Gateway API Swagger page](/tyk-gateway-api) for more details. + +### Access Key Level Security + +Tyk supports the concept of access control at the key level. Access control is managed via three important settings in a session object. In order to be fully clear on how Tyk handles access control, it's worth looking at the key settings that go into a user session object. A full description of each of the options can be found in the [Tyk Gateway API documentation](/tyk-gateway-api). + +Tyk will store each access key as a record in your Redis database, and this key will have certain metadata attached to it. The record takes this form: + +```{.copyWrapper} +{ + "allowance": 1000, + "rate": 1000, + "per": 60, + "expires": -1, + "quota_max": -1, + "quota_renews": 1406121006, + "quota_remaining": 0, + "quota_renewal_rate": 60, + "access_rights": { + "APIID1": { + "api_name": "HMAC API", + "api_id": "APIID1", + "versions": [ + "Default" + ] + } + }, + "org_id": "1", + "hmac_enabled": false, + "hmac_string": "" +} +``` + +The important elements that manage access control are the following fields: + +* `allowance` & `rate`: these should be set to the same value, these are the users allowance during a period as set by `per`. +* `per`: The time in seconds where a rate limit is applied. +* `expires`: The date when the key expires. **Note:** `expires` can only be a positive number, or -1 (unlimited). +* `quota_max`: The usage quota for the user. **Note:** `quota_max` can only be a positive number, or -1 (unlimited). +* `quota_renews`: the Unix timestamp when the quota is renewed. +* `quota_renewal_rate`: The time, in seconds, of when the quota gets renewed (e.g. 86400 would represent 1 day). + +These settings can be used exclusively or in conjunction with one another to create usage patterns and tiers of access for your users. Each time a request is processed by Tyk, the session will be updated with an updated quota (if used), and updated throttling rate depending on the time-frame specified. + +Creating new keys is done by POSTing an object such as the above to the Tyk create key API endpoint. See the keys section of the [Tyk Gateway API OpenAPI/Swagger](/tyk-gateway-api) page. + +The three types of access control are: + +#### Rate limiting + +Also known as throttling, the API will actively only allow a key to make x requests per y time period. this is very useful if you want to ensure your API does not get flooded with requests. + +In order to apply a rate limit: + +1. Ensure that `allowance` and `rate` are set to the same value, this should be number of requests to be allowed in a time period, so if you wanted 100 requests every second, set this value to 100. +2. Ensure that `per` is set to the time limit. Again, as in the above example, if you wanted 100 requests per second, set this value to 1. If you wanted 100 per 5 seconds, set this value to 5 etc. + +#### Quotas + +A quota is similar to a rate limit, as it allows a certain number of requests through in a time period. However, traditionally these periods are much longer, so for example if you would like to limit a user to only 10,000 requests to the API per month, you can create a key that has no rate limiting but will disallow access once the quota is empty. Tyk will automatically reset the quota if the time limit on reset has been exceeded. + +In order to set a quota for a user: + +1. Ensure that `quota_max` is set to the maximum amount of requests that a user is allowed to make in a time period. +2. Ensure `quota_remaining` is set to the same value as `quota_max`, this is the value that will decrement on each request (failed or successful). +3. Set the `quota_renewal_rate` to the value, in seconds, of when the quota should renew. For example, if you would like it to renew every 30 days, you would have `2592000` seconds (`((60*60) * 24) * 30 = 2592000`). + +To set an unlimited quota, set `quota_max` to `-1`. + + + +`quota_max` can only be a positive number, or -1 (unlimited). + + + +#### Key Expiry + +If you set a date in the key expiry field, when the key is created (or updated), the expiry time is also set as the keys deletion time from Redis. If a key has expired Tyk will no longer let requests through on this key. + +Tyk manages timestamps in the Unix timestamp format - this means that when a date is set for expiry it should be converted to a Unix timestamp (usually a large integer) which shows seconds since the epoch (Jan 1 1970). This format is used because it allows for faster processing and takes into account timezone differences without needing localisation. + +Key sessions are created and updated using the Tyk REST API, in order to set the expiry date for a key, update the `expires` value with the timestamp of when the key should expire. + +Leave this field empty for it never to expire. + +## Understanding Tyk Session + +### What is a Session Object + +In Tyk, all identities are mapped to a session object. Identities can be in the form of Bearer Tokens, HMAC Keys, JSON Web Tokens, OpenID Connect identities and Basic Auth users. + +You should think about a session object as the metadata associated with a user, or the identity trying to gain access to your services. + +In Tyk, a session object encapsulates the following details for any given identity: + +* What rate limit to apply +* What quota to apply +* What Access Control List to apply +* What policy ID to use to override the above (if set) +* When the session holder's access expires + +Tyk also allows some additional metadata for a session object which is valuable for transformation or upstream identification purposes: + +* Metadata (a string key/value map that can hold any data) +* Alias (a human-readable name for the identity) + + + + + Expiry is not the same as invalidation, in Tyk, a session object will be "expired" but will still be in the database in order to inform the session owner that their token has expired and they should renew, if the token was invalidated (deleted after the expiry period), then the user would simply be denied access and their token would be invalid. This is important for developers that have (but shouldn't) hard-coded their token into their app so it is hard to change. + + + + +#### Where are session objects stored? + +Session objects are stored in Redis, not in MongoDB or in the Gateway itself. Session objects are stored as a token string / JSON object key/value pair in the Redis DB. + +By default, the token itself is hashed and therefore **obfuscated**, this means using the Alias is important to identify token data in analytics and logs. + +#### Where can I get more information? + +A session object is just a JSON object. For more details of each parameter in the session object, see [Tyk Token Session Object Details](/api-management/policies#session-object). + +#### Session Object + +```{.copyWrapper} +{ + "last_check": 0, + "allowance": 1000, + "rate": 1000, + "per": 1, + "expires": 1458669677, + "quota_max": 1000, + "quota_renews": 1458667309, + "quota_remaining": 1000, + "quota_renewal_rate": 3600, + "access_rights": { + "e1d21f942ec746ed416ab97fe1bf07e8": { + "api_name": "Closed", + "api_id": "e1d21f942ec746ed416ab97fe1bf07e8", + "versions": ["Default"], + "allowed_urls": null + } + }, + "org_id": "53ac07777cbb8c2d53000002", + "oauth_client_id": "", + "basic_auth_data": { + "password": "", + "hash_type": "" + }, + "jwt_data": { + "secret": "" + }, + "hmac_enabled": false, + "hmac_string": "", + "is_inactive": false, + "apply_policy_id": "", + "apply_policies": [ + "59672779fa4387000129507d", + "53222349fa4387004324324e", + "543534s9fa4387004324324d" + ], + "data_expires": 0, + "monitor": { + "trigger_limits": null + }, + "meta_data": { + "test": "test-data" + }, + "tags": ["tag1", "tag2"], + "alias": "john@smith.com" +} +``` + +* `last_check` (**deprecated**): No longer used, but this value is related to rate limiting. + +* `allowance` (**deprecated**): No longer directly used, this value, no key creation, should be the same as `rate`. + +* `rate`: The number of requests that are allowed in the specified rate limiting window. + +* `per`: The number of seconds that the rate window should encompass. + +* `expires`: A Unix timestamp that defines when the key should expire. You can set this to `0` (zero) if you don't want the key to expire. + +* `quota_max`: The maximum number of requests allowed during the quota period. + +* `quota_renews`: An epoch that defines when the quota renews. + +* `quota_remaining`: The number of requests remaining for this user's quota (unrelated to rate limit). + +* `quota_renewal_rate`: The time, in seconds. during which the quota is valid. So for `1000` requests per hour, this value would be `3600` while `quota_max` and `quota_remaining` would be `1000`. + +* `access_rights`: This section is defined in the Access Control section of this documentation, use this section define what APIs and versions this token has access to. + +* `org_id`: The organization this user belongs to, this can be used in conjunction with the `org_id` setting in the API Definition object to have tokens "owned" by organizations. See the Organizations Quotas section of the [Tyk Gateway API](/tyk-gateway-api). + +* `oauth_client_id`: This is set by Tyk if the token is generated by an OAuth client during an OAuth authorization flow. + +* `basic_auth_data`: This section defines the basic auth password and hashing method. + +* `jwt_data`: This section contains a JWT shared secret if the ID matches a JWT ID. + +* `hmac_enabled`: If this token belongs to an HMAC user, this will set the token as a valid HMAC provider. + +* `hmac_string`: The value of the HMAC shared secret. + +* `is_inactive`: Set this value to `true` to deny access. + +* `apply_policy_id` (**supported but now deprecated**): The policy ID that is bound to this token. + +* `apply_policies`: This replaces `apply_policy_id` and lists your policy IDs as an array. This supports the **Multiple Policy** feature introduced in the **v2.4 of the Gateway**. + +* `data_expires`: An value, in seconds, that defines when data generated by this token expires in the analytics DB (must be using Pro edition and MongoDB). + +* `monitor`: Rate monitor trigger settings, defined elsewhere in the documentation. + +* `meta_data`: Metadata to be included as part of the session, this is a key/value string map that can be used in other middleware such as transforms and header injection to embed user-specific data into a request, or alternatively to query the providence of a key. + +* `tags`: Tags are embedded into analytics data when the request completes. If a policy has tags, those tags will supersede the ones carried by the token (they will be overwritten). + +* `alias`: As of v2.1, an Alias offers a way to identify a token in a more human-readable manner, add an Alias to a token in order to have the data transferred into Analytics later on so you can track both hashed and un-hashed tokens to a meaningful identifier that doesn't expose the security of the underlying token. + +### What is a Session Metadata + +As described in [What is a Session Object?](/api-management/policies#what-is-a-session-object), all Tyk tokens can contain a metadata field. This field is a string key/value map that can store any kind of information about the underlying identity of a session. + +The metadata field is important, because it can be used in various ways: + +- to inform an admin of the provenance of a token +- values can be injected into headers for upstream services to consume (e.g. a user ID or an email address provided at the time of creation) +- values can be used in dynamic [JavaScript](/api-management/plugins/javascript#accessing-external-and-dynamic-data) middleware and Virtual Endpoints for further validation or request modification + +Metadata is also injected by other Tyk Components when keys are created using "generative" methods, such as JSON Web Token and OIDC session creation and via the Developer Portal, to include information about the underlying identity of the token when it comes from a third-party such as an OAuth IDP (e.g. OIDC). + +#### Middleware that can use metadata + +Metadata is exposed in several middleware for use in the middleware configuration: + +- [URL Rewrite](/transform-traffic/url-rewriting#pattern) +- [Request Header Transformation](/api-management/traffic-transformation/request-headers#injecting-dynamic-data-into-headers) +- [Response Header Transformation](/api-management/traffic-transformation/request-headers#injecting-dynamic-data-into-headers) +- [Request Body Transformation](/api-management/traffic-transformation/request-body#data-accessible-to-the-middleware) +- [Response Body Transformation](/api-management/traffic-transformation/request-body#data-accessible-to-the-middleware) +- [Virtual Endpoints](/api-management/traffic-transformation/virtual-endpoints) + +You can also access and update metadata from your [custom plugins](/api-management/plugins/overview#). For an example of this, take a look at this [gRPC enabled GO Server](https://github.com/TykTechnologies/tyk-grpc-go-basicauth-jwt). It's a PoC middleware that injects a JWT value into metadata and then accesses it later in the stream. + + +## Set Physical Key Expiry and Deletion +Tyk makes a clear distinction between an API authorization key expiring and being deleted from the Redis storage. + +- When a key expires, it remains in the Redis storage but is no longer valid. Consequently, it is no longer authorized to access any APIs. If a key in Redis has expired and is passed in an API request, Tyk will return `HTTP 401 Key has expired, please renew`. + - When a key is deleted from Redis, Tyk no longer knows about it, so if it is passed in an API request, Tyk will return `HTTP 400 Access to this API has been disallowed`. + +Tyk provides separate control for the expiration and deletion of keys. + +Note that where we talk about keys here, we are referring to [Session Objects](/api-management/policies#what-is-a-session-object), also sometimes referred to as Session Tokens + +### Key expiry + +Tyk's API keys ([token session objects](/api-management/policies#session-object)) have an `expires` field. This is a UNIX timestamp and, when this date/time is reached, the key will automatically expire; any subsequent API request made using the key will be rejected. + +### Key lifetime + +Tyk does not automatically delete keys when they expire. You may prefer to leave expired keys in Redis storage, so that they can be renewed (for example if a user has - inadvisedly - hard coded the key into their application). Alternatively, you may wish to delete keys to avoid cluttering up Redis storage with obsolete keys. + +You have two options for configuring the lifetime of keys when using Tyk: + +1. At the API level +2. At the Gateway level + +#### API-level key lifetime control + +You can configure Tyk to delete keys after a configurable period (lifetime) after they have been created. Simply set the `session_lifetime` field in your API Definition and keys created for that API will automatically be deleted when that period (in seconds) has passed. + +The default value for `session_lifetime` is 0, this is interpreted as an infinite lifetime which means that keys will not be deleted from Redis. + +For example, to have keys live in Redis for only 24 hours (and be deleted 24 hours after their creation) set: + +```{.json} +"session_lifetime": 86400 +``` + + + +There is a risk, when configuring API-level lifetime, that a key will be deleted before it has expired, as `session_lifetime` is applied regardless of whether the key is active or expired. To protect against this, you can configure the [session_lifetime_respects_key_expiration](/tyk-oss-gateway/configuration#session_lifetime_respects_key_expiration) parameter in your `tyk.conf`, so that keys that have exceeded their lifetime will not be deleted from Redis until they have expired. + + + +This feature works nicely with [JWT](/basic-config-and-security/security/authentication-authorization/json-web-tokens) or [OIDC](/api-management/client-authentication#integrate-with-openid-connect-deprecated) authentication methods, as the keys are created in Redis the first time they are in use so you know when they will be removed. Be extra careful in the case of keys created by Tyk (Auth token or JWT with individual secrets) and set a long `session_lifetime`, otherwise the user might try to use the key **after** it has already been removed from Redis. + +#### Gateway-level key lifetime control + +You can set a global lifetime for all keys created in the Redis by setting [global_session_lifetime](/tyk-oss-gateway/configuration#global_session_lifetime) in the `tyk.conf` file; this parameter is an integer value in seconds. + +To enable this global lifetime, you must also set the [force_global_session_lifetime](/tyk-oss-gateway/configuration#force_global_session_lifetime) parameter in the `tyk.conf` file. + +#### Summary of key lifetime precedence + +The table below shows the key lifetime assigned for the different permutations of `force_global_session_lifetime` and `session_lifetime_respects_key_expiration` configuration parameters. +| `force_global_session_lifetime` | `session_lifetime_respects_key_expiration` | Assigned lifetime | +| :--------------------------------- | :-------------------------------------------- | :------------------------------------------- | +| `true` | `true` | `global_session_lifetime` | +| `true` | `false` | `global_session_lifetime` | +| `false` | `true` | larger of `session_lifetime` or `expires` | +| `false` | `false` | `session_lifetime` | + + + +It is important to remember that a value of `0` in `session_lifetime` or `global_session_lifetime` is interpreted as infinity (i.e. key will not be deleted if that control is in use) - and if a field is not set, this is treated as `0`. +
+If you want the key to be deleted when it expires (i.e. to use the expiry configured in `expires` within the key to control deletion) then you must set a non-zero value in `session_lifetime` and configure both `session_lifetime_respects_key_expiration:true` and `force_global_session_lifetime:false`. +
+ diff --git a/api-management/rate-limit.mdx b/api-management/rate-limit.mdx new file mode 100644 index 000000000..9ab36897b --- /dev/null +++ b/api-management/rate-limit.mdx @@ -0,0 +1,911 @@ +--- +title: "Rate Limiting" +description: "Overview of Rate Limiting with the Tyk Gateway" +keywords: "Rate Limit, Rate Limiting, Rate Limit Algorithms, Distributed Rate Limiter, Redis Rate Limiter, Fixed Window, Spike Arrest, Rate Limit Scope, Local, Local rate Limits, Tyk Classic, Tyk Classic API, Tyk OAS, Tyk OAS API, Rate Limiting, Global limits, Per API limits" +sidebarTitle: "Rate Limiting" +--- + +## Introduction + +API rate limiting is a technique that allows you to control the rate at which clients can consume your APIs and is one of the fundamental aspects of managing traffic to your services. It serves as a safeguard against abuse, overloading, and denial-of-service attacks by limiting the rate at which an API can be accessed. By implementing rate limiting, you can ensure fair usage, prevent resource exhaustion, and maintain system performance and stability, even under high traffic loads. + +## What is rate limiting? + +Rate limiting involves setting thresholds for the maximum number of requests that can be made within a specific time window, such as requests per second, per minute, or per day. Once a client exceeds the defined rate limit, subsequent requests may be delayed, throttled, or blocked until the rate limit resets or additional capacity becomes available. + +## When might you want to use rate limiting? + +Rate limiting may be used as an extra line of defense around attempted denial of service attacks. For instance, if you have load-tested your current system and established a performance threshold that you would not want to exceed to ensure system availability and/or performance then you may want to set a global rate limit as a defense to ensure it hasn't exceeded. + +Rate limiting can also be used to ensure that one particular user or system accessing the API is not exceeding a determined rate. This makes sense in a scenario such as APIs which are associated with a monetization scheme where you may allow so many requests per second based on the tier in which that consumer is subscribed or paying for. + +Of course, there are plenty of other scenarios where applying a rate limit may be beneficial to your APIs and the systems that your APIs leverage behind the scenes. + +## How does rate limiting work? + +At a basic level, when rate limiting is in use, Tyk Gateway will compare the incoming request rate against the configured limit and will block requests that arrive at a higher rate. For example, let’s say you only want to allow a client to call the API a maximum of 10 times per minute. In this case, you would apply a rate limit to the API expressed as "10 requests per 60 seconds". This means that the client will be able to successfully call the API up to 10 times within any 60 second interval (or window) and after for any further requests within that window, the user will get an [HTTP 429 (Rate Limit Exceeded)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/429) error response stating the rate limit has been exceeded. + +Tyk's rate limiter is configured using two variables: +- `rate` which is the maximum number of requests that will be permitted during the interval (window) +- `per` which is the length of the interval (window) in seconds + +So for this example you would configure `rate` to 10 (requests) and `per` to 60 (seconds). + +### Rate limiting scopes: API-level vs key-level + +Rate limiting can be applied at different scopes to control API traffic effectively. This section covers the two primary scopes - API-level rate limiting and key-level rate limiting. Understanding the distinctions between these scopes will help you configure appropriate rate limiting policies based on your specific requirements. + +#### API-level rate limiting + +API-level rate limiting aggregates the traffic coming into an API from all sources and ensures that the overall rate limit is not exceeded. Overwhelming an endpoint with traffic is an easy and efficient way to execute a denial of service attack. By using a API-level rate limit you can easily ensure that all incoming requests are within a specific limit so excess requests are rejected by Tyk and do not reach your service. You can calculate the rate limit to set by something as simple as having a good idea of the maximum number of requests you could expect from users of your API during a period. You could alternatively apply a more scientific and precise approach by considering the rate of requests your system can handle while still performing at a high-level. This limit may be easily determined with some performance testing of your service under load. + +#### Key-level rate limiting + +Key-level rate limiting is more focused on controlling traffic from individual sources and making sure that users are staying within their prescribed limits. This approach to rate limiting allows you to configure a policy to rate limit in two ways: + +- **key-level global limit** limiting the rate of calls the user of a key can make to all APIs authorized by that key +- **key-level per-API limit** limiting the rate of calls the user of a key can make to specific individual APIs +- **key-level per-endpoint limit** limiting the rate of calls the user of a key can make to specific individual endpoints of an API + +These guides include explanation of how to configure key-level rate limits when using [API Keys](/api-management/gateway-config-managing-classic#access-an-api) and [Security Policies](/api-management/gateway-config-managing-classic#secure-an-api). + +#### Which scope should I use? + +The simplest way to figure out which level of rate limiting you’d like to apply can be determined by asking a few questions: + +- do you want to protect your service against denial of service attacks or overwhelming amounts of traffic from **all users** of the API? **You’ll want to use an API-level rate limit!** +- do you have a health endpoint that consumes very little resource on your service and can handle significantly more requests than your other endpoints? **You'll want to use an API-level per-endpoint rate limit!** +- do you want to limit the number of requests a specific user can make to **all APIs** they have access to? **You’ll want to use a key-level global rate limit!** +- do you want to limit the number of requests a specific user can make to **specific APIs** they have access to? **You’ll want to use a key-level per-API rate limit.** +- do you want to limit the number of requests a specific user can make to a **specific endpoint of an API** they have access to? **You’ll want to use a key-level per-endpoint rate limit.** + +### Applying multiple rate limits + +When multiple rate limits are configured, they are assessed in this order (if applied): + +1. API-level per-endpoint rate limit (configured in API definition) +2. API-level rate limit (configured in API definition) +3. Key-level per-endpoint rate limit (configured in access key) +4. Key-level per-API rate limit (configured in access key) +5. Key-level global rate limit (configured in access key) + +### Combining multiple policies configuring rate limits + +If more than one policy defining a rate limit is applied to a key then Tyk will apply the highest request rate permitted by any of the policies that defines a rate limit. + +If `rate` and `per` are configured in multiple policies applied to the same key then the Gateway will determine the effective rate limit configured for each policy and apply the highest to the key. + +Given, policy A with `rate` set to 90 and `per` set to 30 seconds (3rps) and policy B with `rate` set to 100 and `per` set to 10 seconds (10rps). If both are applied to a key, Tyk will take the rate limit from policy B as it results in a higher effective request rate (10rps). + + + +Prior to Tyk 5.4.0 there was a long-standing bug in the calculation of the effective rate limit applied to the key where Tyk would combine the highest `rate` and highest `per` from the policies applied to the key, so for the example above the key would have `rate` set to 100 and `per` set to 30 giving an effective rate limit of 3.33rps. This has now been corrected. + + + +## Rate limiting algorithms + +Different rate limiting algorithms are employed to cater to varying requirements, use cases and gateway deployments. A one-size-fits-all approach may not be suitable, as APIs can have diverse traffic patterns, resource constraints, and service level objectives. Some algorithms are more suited to protecting the upstream service from overload whilst others are suitable for per-client limiting to manage and control fair access to a shared resource. + +Tyk offers the following rate limiting algorithms: + +1. [Distributed Rate Limiter](#distributed-rate-limiter): recommended for most use cases, implements the [token bucket algorithm](https://en.wikipedia.org/wiki/Token_bucket) +2. [Redis Rate Limiter](#redis-rate-limiter): implements the [sliding window log algorithm](https://developer.redis.com/develop/dotnet/aspnetcore/rate-limiting/sliding-window) +3. [Fixed Window Rate Limiter](#fixed-window-rate-limiter): implements the [fixed window algorithm](https://redis.io/learn/develop/dotnet/aspnetcore/rate-limiting/fixed-window) + +When the rate limits are reached, Tyk will block requests with an [HTTP 429 (Rate Limit Exceeded)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/429) response. + + + +Tyk supports selection of the rate limit algorithm at the Gateway level, so the same algorithm will be applied to all APIs. +It can be configured to switch dynamically between two algorithms depending on the request rate, as explained [here](#dynamic-algorithm-selection-based-on-request-rate). + + + +### Distributed Rate Limiter + +The Distributed Rate Limiter (DRL) is the default rate limiting mechanism in Tyk Gateway. It is +implemented using a token bucket implementation that does not use Redis. +In effect, it divides the configured rate limit between the number of +addressable gateway instances. + +The characteristics of DRL are: + +- a rate limit of 100 requests/min with 2 gateways yields 50 requests/min per gateway +- unreliable at low rate limits where requests are not fairly balanced + +DRL can face challenges in scenarios where traffic is not evenly +distributed across gateways, such as with sticky sessions or keepalive +connections. These conditions can lead to certain gateways becoming +overloaded while others remain underutilized, compromising the +effectiveness of configured rate limiting. This imbalance is particularly +problematic in smaller environments or when traffic inherently favors +certain gateways, leading to premature rate limits on some nodes and +excess capacity on others. + +DRL will be used automatically unless one of the other rate limit +algorithms are explicitly enabled via configuration. + +It's important to note that this algorithm will yield approximate results due to the nature of the local +rate limiting, where the total allowable request rate is split between the gateways; uneven distribution +of requests could lead to exhaustion of the limit on some gateways before others. + +### Redis Rate Limiter + +This algorithm implements a sliding window log algorithm and can be enabled via the [enable_redis_rolling_limiter](/tyk-oss-gateway/configuration#enable_redis_rolling_limiter) configuration option. + +The characteristics of the Redis Rate Limiter (RRL) are: + +- using Redis lets any gateway respect a cluster-wide rate limit (shared counter) +- a record of each request, including blocked requests that return `HTTP 429`, is written to the sliding log in Redis +- the log is constantly trimmed to the duration of the defined window +- requests are blocked if the count in the log exceeds the configured rate limit + +An important behavior of this rate limiting algorithm is that it blocks +access to the API when the rate exceeds the rate limit and does not let +further API calls through until the rate drops below the specified rate +limit. For example, if the configured rate limit is 3000 requests/minute the call rate would +have to be reduced below 3000 requests/minute for a whole minute before the `HTTP 429` +responses stop and traffic is resumed. This behavior is called **spike arrest**. + +The complete request log is stored in Redis so resource usage when using this rate limiter is high. +This algorithm will use significant resources on Redis even when blocking requests, as it must +maintain the request log, mostly impacting CPU usage. Redis resource +usage increases with traffic therefore shorter `per` values are recommended to +limit the amount of data being stored in Redis. + +If you wish to avoid spike arrest behavior but the DRL is not suitable, you might use the [Fixed Window Rate Limiter](#fixed-window-rate-limiter) algorithm. + +You can configure [Rate Limit Smoothing](#rate-limit-smoothing) to manage the traffic spike, allowing time to increase upstream capacity if required. + +The [Redis Sentinel Rate Limiter](#redis-sentinel-rate-limiter) reduces latency for clients, however increases resource usage on Redis and Tyk Gateway. + +#### Rate Limit Smoothing + +Rate Limit Smoothing is an optional mechanism of the RRL that dynamically adjusts the request +rate limit based on the current traffic patterns. It helps in managing +request spikes by gradually increasing or decreasing the rate limit +instead of making abrupt changes or blocking requests excessively. + +This mechanism uses the concept of an intermediate *current allowance* (rate limit) that moves between an initial lower +bound (`threshold`) and the maximum configured request rate (`rate`). As the request rate approaches the current +*current allowance*, Tyk will emit an event to notify you that smoothing has been triggered. When the event is emitted, +the *current allowance* will be increased by a defined increment (`step`). A hold-off counter (`delay`) must expire +before another event is emitted and the *current allowance* further increased. If the request rate exceeds the +*current allowance* then the rate limiter will block further requests, returning `HTTP 429` as usual. + +As the request rate falls following the spike, the *current allowance* will gradually reduce back to the lower bound (`threshold`). + +Events are emitted and adjustments made to the *current allowance* based on the following calculations: + +- when the request rate rises above `current allowance - (step * trigger)`, + a `RateLimitSmoothingUp` event is emitted and *current allowance* increases by `step`. +- when the request rate falls below `allowance - (step * (1 + trigger))`, + a `RateLimitSmoothingDown` event is emitted and *current allowance* decreases by `step`. + +##### Configuring rate limit smoothing + +When Redis Rate Limiter is in use, rate limit smoothing is configured with the following options within the `smoothing` object alongside the standard `rate` and `per` parameters: + +- `enabled` (boolean) to enable or disable rate limit smoothing +- `threshold` is the initial rate limit (*current allowance*) beyond which smoothing will be applied +- `step` is the increment by which the *current allowance* will be increased or decreased each time a smoothing event is emitted +- `trigger` is a fraction (typically in the range 0.1-1.0) of the `step` at which point a smoothing event will be emitted as the request rate approaches the *current allowance* +- `delay` is a hold-off between smoothing events and controls how frequently the current allowance will step up or down (in seconds). + +Rate Limit Smoothing is configured using the `smoothing` object within access keys and policies. For API-level rate limiting, this configuration is within the `access_rights[*].limit` object. + +An example configuration would be as follows: + +```yaml + "smoothing": { + "enabled": true, + "threshold": 5, + "trigger": 0.5, + "step": 5, + "delay": 30 + } +``` + +#### Redis Sentinel Rate Limiter + +The Redis Sentinel Rate Limiter option will: + +- write a sentinel key into Redis when the request limit is reached +- use the sentinel key to block requests immediately for `per` duration +- requests, including blocked requests, are written to the sliding log in a background thread + +This optimizes the latency for connecting clients, as they don't have to +wait for the sliding log write to complete. This algorithm exhibits spike +arrest behavior the same as the basic Redis Rate Limiter, however recovery may take longer as the blocking is in +effect for a minimum of the configured window duration (`per`). Gateway and Redis +resource usage is increased with this option. + +This option can be enabled using the following configuration option +[enable_sentinel_rate_limiter](/tyk-oss-gateway/configuration#enable_sentinel_rate_limiter). + +To optimize performance, you may configure your rate limits with shorter +window duration values (`per`), as that will cause Redis to hold less +data at any given moment. + +Performance can be improved by enabling the [enable_non_transactional_rate_limiter](/tyk-oss-gateway/configuration#enable_non_transactional_rate_limiter). This leverages Redis Pipelining to enhance the performance of the Redis operations. Please consult the [Redis documentation](https://redis.io/docs/manual/pipelining/) for more information. + +Please consider the [Fixed Window Rate Limiter](#fixed-window-rate-limiter) algorithm as an alternative, if Redis performance is an issue. + +### Fixed Window Rate Limiter + +The Fixed Window Rate Limiter will limit the number of requests in a +particular window in time. Once the defined rate limit has been reached, +the requests will be blocked for the remainder of the configured window +duration. After the window expires, the counters restart and again allow +requests through. + +- the implementation uses a single counter value in Redis +- the counter expires after every configured window (`per`) duration. + +The implementation does not smooth out traffic bursts within a window. For any +given `rate` in a window, the requests are processed without delay, until +the rate limit is reached and requests are blocked for the remainder of the +window duration. + +When using this option, resource usage for rate limiting does not +increase with traffic. A simple counter with expiry is created for every +window and removed when the window elapses. Regardless of the traffic +received, Redis is not impacted in a negative way, resource usage remains +constant. + +This algorithm can be enabled using the following configuration option [enable_fixed_window_rate_limiter](/tyk-oss-gateway/configuration#enable_fixed_window_rate_limiter). + +If you need spike arrest behavior, the [Redis Rate Limiter](#redis-rate-limiter) should be used. + +### Dynamic algorithm selection based on request rate + +The Distributed Rate Limiter (DRL) works by distributing the +rate allowance equally among all gateways in the cluster. For example, +with a rate limit of 1000 requests per second and 5 gateways, each +gateway can handle 200 requests per second. This distribution allows for +high performance as gateways do not need to synchronize counters for each +request. + +DRL assumes an evenly load-balanced environment, which is typically +achieved at a larger scale with sufficient requests. In scenarios with +lower request rates, DRL may generate false positives for rate limits due +to uneven distribution by the load balancer. For instance, with a rate of +10 requests per second across 5 gateways, each gateway would handle only +2 requests per second, making equal distribution unlikely. + +It's possible to configure Tyk to switch automatically between the Distributed Rate Limiter +and the Redis Rate Limiter by setting the `drl_threshold` configuration. + +This threshold value is used to dynamically switch the rate-limiting +algorithm based on the volume of requests. This option sets a +minimum number of requests per gateway that triggers the Redis Rate +Limiter. For example, if `drl_threshold` is set to 2, and there are 5 +gateways, the DRL algorithm will be used if the rate limit exceeds 10 +requests per second. If it is 10 or fewer, the system will fall back to +the Redis Rate Limiter. + +See [DRL Threshold](/tyk-oss-gateway/configuration#drl_threshold) for details on how to configure this feature. + + +## Custom Rate Limiting + +Different business models may require applying rate limits and quotas not only by credentials but also by other entities, such as per application, per developer, per organization, etc. For example, if an API Product is sold to a B2B customer, the quota of API calls is usually applied to all developers and their respective applications combined, in addition to a specific credential. + +To enable this, Tyk introduced support for custom rate limit keys in [Tyk 5.3.0](/developer-support/release-notes/dashboard#530-release-notes). This feature allows you to define custom patterns for rate limiting that go beyond the default credential-based approach. + +### How Custom Rate Limiting Works + +Custom rate limit keys are applied at a policy level. When a custom rate limit key is specified, quota, rate limit and throttling will be calculated against the specified value and not against a credential ID. + +To specify a custom rate limit key, add to a policy a new metadata field called `rate_limit_pattern`. In the value field you can specify any value or expression that you want to use as a custom rate limit key for your APIs. + +The `rate_limit_pattern` field supports: +- Referencing session metadata using `$tyk_meta.FIELD_NAME` syntax +- Concatenating multiple values together using the pipe operator (`|`) + +### Configuring Custom Rate Limit Keys + +Custom rate limit keys are configured in the Tyk Dashboard by adding a metadata field to your policy: + +1. Navigate to your policy in the Tyk Dashboard +2. Add a new metadata field called `rate_limit_pattern` +3. Set the value to your desired pattern expression + +For example, if you want to specify a rate limit pattern to calculate the rate limit for a combination of developers and plans, where all credentials of a developer using the same plan share the same rate limit, you can use the following expression (assuming that the `DeveloperID` and `PlanID` metadata fields are available in a session): + +```gotemplate +$tyk_meta.DeveloperID|$tyk_meta.PlanID +``` + +Configuring custom rate limit keys + +### Important Considerations + + + +**Updating credential metadata** + +The custom rate limit key capability uses only metadata objects, such as credentials metadata available in a session. Therefore, if the `rate_limit_pattern` relies on credentials metadata, this capability will work only if those values are present. If, after evaluating the `rate_limit_pattern`, its value is equal to an empty string, the rate limiter behavior defaults to rate limiting by credential IDs. + + + +### Advanced Custom Rate Limiting + +As mentioned above, we can easily configure custom rate limit keys for simple scenarios that do not require awareness of the request context. When more complex logic or integration with external services is required to determine the rate-limiting key, for example when you want to rate limit per requester IP address, a [custom authentication plugin](/api-management/plugins/plugin-types#authentication-plugins) can be used to identify and generate the rate limiter key. + +We use an authentication plugin because [it lets us modify the session object](/api-management/plugins/plugin-types#hook-capabilities). + +
+ + + +This mechanism works only for authenticated APIs, since the authentication plugin is not invoked for unauthenticated (keyless) APIs. + + + +#### Example: Rate Limiting by IP Address + +The example below shows an IP based rate limiter implemented as a custom Go plugin for a Tyk OAS API. Note that the Go library function to [obtain the API definition](/api-management/plugins/golang#accessing-the-api-definition) is specific to Tyk OAS APIs, so you would need to modify the plugin for a Tyk Classic API. + +1. It extracts the client's IP address from the request +2. Creates a session object with rate limiting parameters (2 requests per 5 seconds) +3. Sets a custom `rate_limit_pattern` in the session's metadata to use the IP as the rate limiting key +4. Stores this session in Tyk's session store + +When Tyk processes subsequent requests, it uses the IP address as the rate-limiting key, allowing you to rate-limit by IP address. + + + +```go +// IP Rate Limiter for Tyk OAS APIs +func Authenticate(rw http.ResponseWriter, r *http.Request) { + // Get the API definition + requestedAPI := ctx.GetOASDefinition(r) + if requestedAPI == nil { + logger.Error("Could not get Tyk OAS API Definition") + rw.WriteHeader(http.StatusInternalServerError) + return + } + + // Extract the client's real IP address + realIp := request.RealIP(r) + + // Create a session object with rate limiting parameters + sessionObject := &user.SessionState{} + sessionObject = &user.SessionState{ + OrgID: requestedAPI.OrgID, + Rate: 2, // Allow 2 requests + Per: 5, // Per 5 seconds + AccessRights: map[string]user.AccessDefinition{ + requestedAPI.APIID: { + APIID: requestedAPI.APIID, + }, + }, + MetaData: map[string]interface{}{ + "rate_limit_pattern": realIp, // Use IP address as rate limit key + }, + } + + logger.Info("Session Alias: ", sessionObject.Alias) + + // Set session state using session object + ctx.SetSession(r, sessionObject, false) + logger.Info("Session created for request") +} +``` + +#### How to Use This + +1. **Build and Deploy plugin**: Build the plugin and deploy it to your Tyk Gateway. Refer to the [Go Plugin Development Guide](/api-management/plugins/golang#setting-up-your-environment) for instructions on building and deploying Go plugins. + +2. **Configure your API**: Create an authenticated API and set up your API to use the [custom authentication plugin](/api-management/plugins/golang#loading-custom-go-plugins-into-tyk). Note that you will need to select the [multiple authentication](/basic-config-and-security/security/authentication-authorization/multiple-auth) method to invoke both the plugin and your chosen auth method. + +3. **Test your implementation**: Make requests to your API and verify that rate limiting is applied based on client IP addresses. + +While this example demonstrates IP-based rate limiting, you can modify the `rate_limit_pattern` to use any value you want as the rate limiting key, such as: +- A specific header value: `request.Header.Get("X-Custom-ID")` +- A combination of values: `userID + "-" + deviceID` +- A value extracted from the request body or JWT claims + +## Rate Limiting Layers + +You can protect your upstream services from being flooded with requests by configuring rate limiting in Tyk Gateway. Rate limits in Tyk are configured using two parameters: allow `rate` requests in any `per` time period (given in seconds). + +As explained in the [Rate Limiting Concepts](/api-management/rate-limit#introduction) section, Tyk supports configuration of rate limits at both the API-Level and Key-Level for different use cases. + +The API-Level rate limit takes precedence over Key-Level, if both are configured for a given API, since this is intended to protect your upstream service from becoming overloaded. The Key-Level rate limits provide more granular control for managing access by your API clients. + +### Configuring the rate limiter at the API-Level + +If you want to protect your service with an absolute limit on the rate of requests, you can configure an API-level rate limit. You can do this from the API Designer in Tyk Dashboard as follows: + +1. Navigate to the API for which you want to set the rate limit +2. From the **Core Settings** tab, navigate to the **Rate Limiting and Quotas** section +3. Ensure that **Disable rate limiting** is not selected +4. Enter in your **Rate** and **Per (seconds)** values +5. **Save/Update** your changes + +Tyk will now accept a maximum of **Rate** requests in any **Per** period to the API and will reject further requests with an `HTTP 429 Too Many Requests` error. + +Check out the following video to see this being done. + + + +### Configuring the rate limiter at the Key-Level + +If you want to restrict an API client to a certain rate of requests to your APIs, you can configure a Key-Level rate limit via a [Security Policy](/api-management/policies). The allowance that you configure in the policy will be consumed by any requests made to APIs using a key generated from the policy. Thus, if a policy grants access to three APIs with `rate=15 per=60` then a client using a key generated from that policy will be able to make a total of 15 requests - to any combination of those APIs - in any 60 second period before receiving the `HTTP 429 Too Many Requests` error. + + + +It is assumed that the APIs being protected with a rate limit are using the [auth token](/api-management/authentication/bearer-token) client authentication method and policies have already been created. + + + +You can configure this rate limit from the API Designer in Tyk Dashboard as follows: + +1. Navigate to the Tyk policy for which you want to set the rate limit +2. Ensure that API(s) that you want to apply rate limits to are selected +3. Under **Global Limits and Quota**, make sure that **Disable rate limiting** is not selected and enter your **Rate** and **Per (seconds)** values +4. **Save/Update** the policy + +### Setting up a Key-Level Per-API rate limit + +If you want to restrict API clients to a certain rate of requests for a specific API you will also configure the rate limiter via the security policy. However this time you'll assign per-API limits. The allowance that you configure in the policy will be consumed by any requests made to that specific API using a key generated from that policy. Thus, if a policy grants access to an API with `rate=5 per=60` then three clients using keys generated from that policy will each independently be able to make 5 requests in any 60 second period before receiving the `HTTP 429 Too Many Requests` error. + + + +It is assumed that the APIs being protected with a rate limit are using the [auth token](/api-management/authentication/bearer-token) client authentication method and policies have already been created. + + + +You can configure this rate limit from the API Designer in Tyk Dashboard as follows: + +1. Navigate to the Tyk policy for which you want to set the rate limit +2. Ensure that API that you want to apply rate limits to is selected +3. Under **API Access**, turn on **Set per API Limits and Quota** +4. You may be prompted with "Are you sure you want to disable partitioning for this policy?". Click **CONFIRM** to proceed +5. Under **Rate Limiting**, make sure that **Disable rate limiting** is not selected and enter your **Rate** and **Per (seconds)** values +6. **Save/Update** the policy + +Check out the following video to see this being done. + + + +### Setting up a key-level per-endpoint rate limit + +To restrict the request rate for specific API clients on particular endpoints, you can use the security policy to assign per-endpoint rate limits. These limits are set within the policy and will be #enforced for any requests made to that endpoint by clients using keys generated from that policy. + +Each key will have its own independent rate limit allowance. For example, if a policy grants access to an endpoint with a rate limit of 5 requests per 60 seconds, each client with a key from that policy can make 5 requests to the endpoint in any 60-second period. Once the limit is reached, the client will receive an HTTP `429 Too Many Requests` error. + +If no per-endpoint rate limit is defined, the endpoint will inherit the key-level per-API rate limit or the global rate limit, depending on what is configured. + + + +The following assumptions are made: + - The [ignore authentication](/api-management/traffic-transformation/ignore-authentication) middleware should not be enabled for the relevant endpoints. + - If [path-based permissions](/api-management/gateway-config-managing-classic#path-based-permissions) are configured, they must grant access to these endpoints for keys generated from the policies. + + + +You can configure per-endpoint rate limits from the API Designer in Tyk Dashboard as follows: + +1. Navigate to the Tyk policy for which you want to set the rate limit +2. Ensure that API that you want to apply rate limits to is selected +3. Under **API Access** -> **Set endpoint-level usage limits** click on **Add Rate Limit** to configure the rate limit. You will need to provide the rate limit and the endpoint path and method. +4. **Save/Update** the policy + + +### Setting Rate Limits in the Tyk Community Edition Gateway (CE) + +#### Configuring the rate limiter at the (Global) API-Level + +Using the `global_rate_limit` field in the API definition you can specify the API-level rate limit in the following format: `{"rate": 10, "per": 60}`. + +An equivalent example using Tyk Operator is given below: + +```yaml {linenos=table,hl_lines=["14-17"],linenostart=1} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-global-rate-limit +spec: + name: httpbin-global-rate-limit + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + # setting a global rate-limit for the API of 10 requests per 60 seconds + global_rate_limit: + rate: 10 + per: 60 +``` + +### Configuring the rate limiter on the session object + +All actions on the session object must be done via the Gateway API. + +1. Ensure that `allowance` and `rate` are set to the same value: this should be number of requests to be allowed in a time period, so if you wanted 100 requests every second, set this value to 100. + +2. Ensure that `per` is set to the time limit. Again, as in the above example, if you wanted 100 requests per second, set this value to 1. If you wanted 100 requests per 5 seconds, set this value to 5. + +#### Can I disable the rate limiter? + +Yes, the rate limiter can be disabled for an API Definition by selecting **Disable Rate Limits** in the API Designer, or by setting the value of `disable_rate_limit` to `true` in your API definition. + +Alternatively, you could also set the values of `Rate` and `Per (Seconds)` to be 0 in the API Designer. + + + +Disabling the rate limiter at the API-Level does not disable rate limiting at the Key-Level. Tyk will enforce the Key-Level rate limit even if the API-Level limit is not set. + + + +#### Can I set rate limits by IP address? + +Not yet, though IP-based rate limiting is possible using custom pre-processor middleware JavaScript that generates tokens based on IP addresses. See our [Middleware Scripting Guide](/api-management/plugins/javascript#using-javascript-with-tyk) for more details. + +## Rate Limiting by API + +### Tyk Classic API Definition + +The per-endpoint rate limit middleware allows you to enforce rate limits on specific endpoints. This middleware is configured in the Tyk Classic API Definition, either via the Tyk Dashboard API or in the API Designer. + +To enable the middleware, add a new `rate_limit` object to the `extended_paths` section of your API definition. + +The `rate_limit` object has the following configuration: + +- `path`: the endpoint path +- `method`: the endpoint HTTP method +- `enabled`: boolean to enable or disable the rate limit +- `rate`: the maximum number of requests that will be permitted during the interval (window) +- `per`: the length of the interval (window) in seconds + +You can set different rate limits for various endpoints by specifying multiple `rate_limit` objects. + +#### Simple endpoint rate limit + +For example: + +```json {linenos=true, linenostart=1} +{ + "use_extended_paths": true, + "extended_paths": { + "rate_limit": [ + { + "path": "/anything", + "method": "GET", + "enabled": true, + "rate": 60, + "per": 1 + } + ] + } +} +``` + +In this example, the rate limit middleware has been configured for HTTP +`GET` requests to the `/anything` endpoint, limiting requests to 60 per +second. + +#### Advanced endpoint rate limit + +For more complex scenarios, you can configure rate limits for multiple +paths. The order of evaluation matches the order defined in the +`rate_limit` array. For example, if you wanted to limit the rate of +`POST` requests to your API allowing a higher rate to one specific +endpoint you could configure the API definition as follows: + +```json {linenos=true, linenostart=1} +{ + "use_extended_paths": true, + "extended_paths": { + "rate_limit": [ + { + "path": "/user/login", + "method": "POST", + "enabled": true, + "rate": 100, + "per": 1 + }, + { + "path": "/.*", + "method": "POST", + "enabled": true, + "rate": 60, + "per": 1 + } + ] + } +} +``` + +In this example, the first rule limits `POST` requests to `/user/login` +to 100 requests per second (rps). Any other `POST` request matching the +regex pattern `/.*` will be limited to 60 requests per second. The order +of evaluation ensures that the specific `/user/login` endpoint is matched +and evaluated before the regex pattern. + +The per-endpoint rate limit middleware allows you to enforce rate limits on specific endpoints. This middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation), either via the Tyk Dashboard API or in the API Designer. + +If you’re using the legacy Tyk Classic APIs, then check out the [Tyk Classic](/api-management/rate-limit#tyk-classic-api-definition) page. + +### Tyk OAS API Definition + +The design of the Tyk OAS API Definition takes advantage of the +`operationId` defined in the OpenAPI Document that declares both the path +and method for which the middleware should be added. Endpoint `paths` +entries (and the associated `operationId`) can contain wildcards in the +form of any string bracketed by curly braces, for example +`/status/{code}`. These wildcards are so they are human-readable and do +not translate to variable names. Under the hood, a wildcard translates to +the β€œmatch everything” regex of: `(.*)`. + +The rate limit middleware (`rateLimit`) can be added to the `operations` section of the +Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition +for the appropriate `operationId` (as configured in the `paths` section +of your OpenAPI Document). + +The `rateLimit` object has the following configuration: + +- `enabled`: enable the middleware for the endpoint +- `rate`: the maximum number of requests that will be permitted during the interval (window) +- `per`: the length of the interval (window) in time duration notation (e.g. 10s) + +#### Simple endpoint rate limit + +For example: + +```json {hl_lines=["39-43"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-rate-limit", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/status/200": { + "get": { + "operationId": "status/200get", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-rate-limit", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-rate-limit/", + "strip": true + } + }, + "middleware": { + "operations": { + "status/200get": { + "rateLimit": { + "enabled": true, + "rate": 60, + "per": "1s" + } + } + } + } + } +} +``` + +In this example, a rate limit has been configured for the `GET +/status/200` endpoint, limiting requests to 60 per second. + +The configuration above is a complete and valid Tyk OAS API Definition +that you can import into Tyk to try out the Per-endpoint Rate Limiter +middleware. + +#### Advanced endpoint rate limit + +For more complex scenarios, you can configure rate limits for multiple +paths. The order of evaluation matches the order that endpoints are +defined in the `paths` section of the OpenAPI description. For example, +if you wanted to limit the rate of `POST` requests to your API allowing a +higher rate to one specific endpoint you could configure the API +definition as follows: + +```json {hl_lines=["49-53", "56-60"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "advanced-rate-limit", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/user/login": { + "post": { + "operationId": "user/loginpost", + "responses": { + "200": { + "description": "" + } + } + } + }, + "/{any}": { + "post": { + "operationId": "anypost", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "advanced-rate-limit", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/advanced-rate-limit/", + "strip": true + } + }, + "middleware": { + "operations": { + "user/loginpost": { + "rateLimit": { + "enabled": true, + "rate": 100, + "per": "1s" + } + }, + "anypost": { + "rateLimit": { + "enabled": true, + "rate": 60, + "per": "1s" + } + } + } + } + } +} +``` + +In this example, the first rule limits requests to the `POST /user/login` +endpoint to 100 requests per second (rps). Any other `POST` request to an +endpoint path that matches the regex pattern `/{any}` will be limited to +60 rps. The order of evaluation ensures that the specific `POST +/user/login` endpoint is matched and evaluated before the regex pattern. + +The configuration above is a complete and valid Tyk OAS API Definition +that you can import into Tyk to try out the Per-endpoint Rate Limiter +middleware. + +### Configuring the middleware in the API Designer + +Configuring per-endpoint rate limits for your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Rate Limit middleware** + + Select **ADD MIDDLEWARE** and choose **Rate Limit** from the *Add Middleware* screen. + + Adding the Rate Limit middleware + +3. **Configure the middleware** + + You must provide the path to the compiled plugin and the name of the Go function that should be invoked by Tyk Gateway when the middleware is triggered. + + Configuring the per-endpoint custom plugin + +4. **Save the API** + + Select **ADD MIDDLEWARE** to save the middleware configuration. Remember to select **SAVE API** to apply the changes. + +## Rate Limiting with Tyk Streams + +A rate limit is a strategy for limiting the usage of a shared resource across parallel components in a Tyk Streams instance, or potentially across multiple instances. They are configured as a resource: + +```yaml +rate_limit_resources: + - label: foobar + local: + count: 500 + interval: 1s +``` + +And most components that hit external services have a field `rate_limit` for specifying a rate limit resource to use. For example, if we wanted to use our `foobar` rate limit with a HTTP request: + +```yaml +input: + http_client: + url: TODO + verb: GET + rate_limit: foobar +``` + +By using a rate limit in this way we can guarantee that our input will only poll our HTTP source at the rate of 500 requests per second. + +{/* TODO: when rate-limit processor supported: +Some components don't have a `rate_limit` field but we might still wish to throttle them by a rate limit, in which case we can use the rate_limit processor that applies back pressure to a processing pipeline when the limit is reached. For example: + +```yaml +input: + http_server: + path: /post +output: + http_server: + ws_path: /subscribe +pipeline: + processors: + - rate_limit: + resource: example_rate_limit +rate_limit_resources: + - label: example_rate_limit + local: + count: 3 + interval: 20s +``` */} + + +### Local + +The local rate limit is a simple X every Y type rate limit that can be shared across any number of components within the pipeline but does not support distributed rate limits across multiple running instances of Tyk Streams. + +```yml +# Config fields, showing default values +label: "" +local: + count: 1000 + interval: 1s +``` + +**Configuration Fields** + +**count** + +The maximum number of requests to allow for a given period of time. + + +Type: `int` +Default: `1000` + +**interval** + +The time window to limit requests by. + + +Type: `string` +Default: `"1s"` diff --git a/api-management/request-quotas.mdx b/api-management/request-quotas.mdx new file mode 100644 index 000000000..5c8dfd7ac --- /dev/null +++ b/api-management/request-quotas.mdx @@ -0,0 +1,773 @@ +--- +title: "Request Quotas" +description: "Overview of Rate Quotas with the Tyk Gateway" +keywords: "Request Quotas, API Quotas, Usage Limits, Consumption Control" +sidebarTitle: "Request Quotas" +--- + +## Introduction + +Request Quotas in Tyk Gateway allow you to set a maximum number of API requests for a specific API key or Policy over longer, defined periods (e.g., day, week, month). This feature is distinct from rate limiting (which controls requests per second), and it is essential for managing API consumption, enforcing service tiers, and protecting your backend services from sustained overuse over time. + +```mermaid +flowchart LR + Client[API Client] -->|Makes Request| Gateway[Tyk Gateway] + Gateway -->|Check Quota| Redis[(Redis)] + Redis -->|Quota OK| Gateway + Redis -->|Quota Exceeded| Gateway + Gateway -->|If Quota OK| Upstream[Upstream API] + Gateway -->|If Quota Exceeded| Reject[Reject Request] + Upstream -->|Response| Gateway + Gateway -->|Response| Client +``` + +### Key Benefits + +* **Enforce Usage Limits:** Cap the total number of requests allowed over extended periods (days, weeks, months) per consumer. +* **Implement Tiered Access:** Easily define different usage allowances for various subscription plans (e.g., Free, Basic, Pro). +* **Protect Backend Services:** Prevent individual consumers from overwhelming upstream services with consistently high volume over long durations. +* **Enable Usage-Based Monetization:** Provide a clear mechanism for charging based on consumption tiers. + +--- +## Quick Start + +### Overview + +In this tutorial, we will configure Request Quotas on a Tyk Security Policy to limit the number of requests an API key can make over a defined period. Unlike rate limits (requests per second), quotas control overall volume. We'll set a low quota limit with a short renewal period for easy testing, associate a key with the policy, observe blocked requests once the quota is exhausted, and verify that the quota resets after the period elapses. This guide primarily uses the Tyk Dashboard for configuration. + +### Prerequisites + +- **Working Tyk Environment:** You need access to a running Tyk instance that includes both the Tyk Gateway and Tyk Dashboard components. For setup instructions using Docker, please refer to the [Tyk Quick Start](https://github.com/TykTechnologies/tyk-pro-docker-demo?tab=readme-ov-file#quick-start). +- **Curl, Seq and Sleep**: These tools will be used for testing. + +### Instructions + +#### Create an API + +1. **Create an API:** + 1. Log in to your Tyk Dashboard. + 2. Navigate to **API Management > APIs** + 3. Click **Add New API** + 4. Click **Import** + 5. Select **Import Type** as **Tyk API** + 6. Copy the below Tyk OAS definition in the text box and click **Import API** to create an API. + + + + + ```json + { + "components": { + "securitySchemes": { + "authToken": { + "in": "header", + "name": "Authorization", + "type": "apiKey" + } + } + }, + "info": { + "title": "Request Quota Test", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": {}, + "security": [ + { + "authToken": [] + } + ], + "servers": [ + { + "url": "http://tyk-gateway.localhost:8080/request-quota-test/" + } + ], + "x-tyk-api-gateway": { + "info": { + "name": "Request Quota Test", + "state": { + "active": true + } + }, + "middleware": { + "global": { + "contextVariables": { + "enabled": true + }, + "trafficLogs": { + "enabled": true + } + } + }, + "server": { + "authentication": { + "enabled": true, + "securitySchemes": { + "authToken": { + "enabled": true + } + } + }, + "listenPath": { + "strip": true, + "value": "/request-quota-test/" + } + }, + "upstream": { + "url": "http://httpbin.org/" + } + } + } + ``` + + + + +#### Configure Policy and Quota + +2. **Create and Configure a Security Policy with a Request Quota:** + + + + + 1. Navigate to **API Security > Policies** in the Tyk Dashboard sidebar. + 2. Click the **Add Policy** button. + 3. Under the **1. Access Rights** tab, in the **Add API Access Rule** section, select the `Request Quota Test` API. + 4. Scroll down to the **Global Limits and Quota** section (still under the **1. Access Rights** tab): + * **Important:** Disable **Rate Limiting** by selecting **Disable rate limiting** option, so it doesn't interfere with testing the quota. + * Set the following values for `Usage Quotas`: + * Uncheck the `Unlimited requests` checkbox + * Enter `10` into the **Max Requests per period** field. (This is our low quota limit for testing). + * Select `1 hour` from the **Quota resets every:** dropdown. (In the next step, we will modify it to 60 seconds via API for quick testing, as 1 hour is a very long period. From the dashboard, we can only select pre-configured options.) + 5. Select the **2. Configuration** tab. + 6. In the **Policy Name** field, enter `Request Quota Policy`. + 7. From the **Key expire after** dropdown, select `1 hour`. + 8. Click the **Create Policy** button. + + + + + policy with request quota configured + +4. **Update Quota Reset Period via API:** + + As the Dashboard UI doesn't allow setting a shorter duration, we will set the Quota reset period to a value of 1 minute for testing purposes. The following commands search for the policy, modify its `quota_renewal_rate` to 60 seconds, and update the API. + + **Note:** Obtain your Dashboard API key by clicking on the User profile at the top right corner, then click on `Edit Profile`, and select the key available under `Tyk Dashboard API Access Credentials`. Now in the below command replace `` with the API key you obtained from the Dashboard UI. + + ``` + curl -s --location 'http://localhost:3000/api/portal/policies/search?q=Request%20Quota%20Policy' \ + -H "Authorization: " \ + -H "Accept: application/json" > policy.json + + jq '.Data[0] | .quota_renewal_rate = 60' policy.json > updated_policy.json + jq -r '.Data[0]._id' policy.json > policy_id.txt + + curl --location "http://localhost:3000/api/portal/policies/$(cat policy_id.txt)" \ + -H "Authorization: " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -X PUT \ + -d @updated_policy.json + ``` + +3. **Associate an Access Key with the Policy:** + + + + + 1. Navigate to **API Security > Keys** in the Tyk Dashboard sidebar. + 2. Click the **Add Key** button. + 3. Under the **1. Access Rights** tab: + * In the **Apply Policy** section, select the `Request Quota Policy`. + 4. Select the **2. Configuration** tab. + 5. In the **Alias** field, enter `Request Quota Key`. This provides a human-readable identifier. + 6. From the **Expires** dropdown, select `1 hour`. + 7. Click the **Create Key** button. + 8. A pop-up window **"Key created successfully"** will appear displaying the key details. **Copy the Key ID** value shown and save it securely. You will need this key to make API requests in the following steps. + 9. Click **OK** to close the pop-up. + + + + +#### Testing + +4. **Test Quota Exhaustion:** + + We've set a quota of 10 requests per 60 seconds. Let's send more than 10 requests within that window to observe the quota being enforced. + + 1. Open your terminal. + 2. Execute the following command, replacing `` with the API Key ID you saved earlier. This command attempts to send 15 requests sequentially. + + ```bash + for i in $(seq 1 15); do \ + echo -n "Request $i: "; \ + curl -s -o /dev/null -w "%{http_code}\n" -H "Authorization: " http://tyk-gateway.localhost:8080/request-quota-test/get; \ + sleep 0.1; \ + done + ``` + + *(Note: The `sleep 0.1` adds a tiny delay, but ensure all 15 requests execute well within the 60-second quota window).* + + 3. **Expected Observation:** You should see the first 10 requests succeed, returning an HTTP status code `200`. After the 10th request, the subsequent requests (11 through 15) should be blocked by the quota limit, returning an HTTP status code `403` (Forbidden). + + **Sample Output:** + + ```bash + Request 1: 200 + Request 2: 200 + Request 3: 200 + Request 4: 200 + Request 5: 200 + Request 6: 200 + Request 7: 200 + Request 8: 200 + Request 9: 200 + Request 10: 200 + Request 11: 403 + Request 12: 403 + Request 13: 403 + Request 14: 403 + Request 15: 403 + ``` + +5. **Test Quota Reset:** + + Now, let's wait for the quota period (60 seconds) to elapse and then send another request to verify that the quota allowance has been reset. + + 1. Wait slightly longer than the reset period in the same terminal. The command below waits for 70 seconds. + + ```bash + echo "Waiting for quota to reset (70 seconds)..." + sleep 70 + echo "Wait complete. Sending one more request..." + ``` + + 2. Send one more request using the same API key: + + ```bash + curl -s -o /dev/null -w "%{http_code}\n" -H "Authorization: " http://tyk-gateway.localhost:8080/request-quota-test/get + ``` + + 3. **Expected Observation:** This request should now succeed, returning an HTTP status code `200`. This demonstrates that because the 60-second quota period ended, the *next* request made after that period triggered the quota reset, replenishing the allowance. + + **Sample Output:** + + ```bash + Waiting for quota to reset (70 seconds)... + Wait complete. Sending one more request... + 200 + ``` + +This quick start demonstrates the fundamental behaviour of Request Quotas: they limit the total number of requests allowed within a specific period and automatically reset the allowance once that period renews (triggered by the next request). + +--- +## Configuration Options + +Request Quotas in Tyk can be configured at various levels. + +The configuration involves setting two specific fields: + +1. **QuotaMax**: The maximum number of requests allowed during the quota period. + - Set to `-1` for unlimited requests + - Set to a positive integer (e.g., `1000`) to limit total requests + +2. **QuotaRenewalRate**: The time in seconds for which the quota applies. + - Example: `3600` for the hourly quota (1 hour = 3600 seconds) + - Example: `86400` for the daily quota (24 hours = 86400 seconds) + - Example: `2592000` for the monthly quota (30 days = 2592000 seconds) + +### System-Level Configuration + +Global quota settings are configured in the Tyk Gateway configuration file (`tyk.conf`). These settings affect how quotas are enforced across the entire gateway. + + + + +```json +{ +// Partial config from tyk.conf + "enforce_org_quotas": true, + "enforce_org_data_detail_logging": false, + "monitor": { + "enable_trigger_monitors": true, + "global_trigger_limit": 80.0, + "monitor_user_keys": true, + "monitor_org_keys": true + }, +// ... more config follows +} +``` + +- `enforce_org_quotas`: When set to `true`, enables organization-level quota enforcement +- `monitor.enable_trigger_monitors`: Enables quota monitoring and webhook triggers +- `monitor.global_trigger_limit`: Percentage of quota usage that triggers alerts (e.g., 80.0 means 80%) +- `monitor.monitor_user_keys`: Enables monitoring for individual API keys +- `monitor.monitor_org_keys`: Enables monitoring for organization quotas + + + +```bash +export TYK_GW_ENFORCEORGQUOTAS=true +``` + + + +Refer to the [Tyk Gateway Configuration Reference](/tyk-oss-gateway/configuration#enforce_org_quotas) for more details on this setting. + +{/* Why we are commenting org quotas: Organization quotas are a hangover from the Classic Cloud, where all clients shared a deployment. They are not documented anywhere presently, and I’m not sure why we would start to do so - but if we’re going to, we need to be very careful not to add complexity to the way users configure things. */} + +{/* ### Organization-Level Configuration + +Organization quotas limit the total number of requests across all APIs for a specific organization. These are enforced by the `OrganizationMonitor` middleware when `enforce_org_quotas` is enabled. + +- `quota_max`: Maximum number of requests allowed during the quota period +- `quota_renewal_rate`: Time in seconds for the quota period (e.g., 3600 for hourly quotas) + +Organization quotas are configured through the Tyk Dashboard API or Gateway API: + +```bash +curl -X POST -H "Authorization: {your-api-key}" \ + -H "Content-Type: application/json" \ + -d '{ + "quota_max": 1000, + "quota_renewal_rate": 3600, + }' \ + http://tyk-gateway:8080/tyk/org/keys/{org-id} +``` */} + +### API-Level Configuration + +You **cannot set** quota values within an API Definition, but you can **disable** quota checking entirely for all requests proxied through that specific API, regardless of Key or Policy settings. This is useful if an API should never have quota limits applied. + + + + + +In a Tyk OAS API Definition, you can globally disable quotas for specific APIs: + +- **skipQuota**: When set to true, disables quota enforcement for the API. +- **skipQuotaReset**: When set to true, prevents quota counters from being reset when creating or updating quotas. + +```json +{ + // Partial config from Tyk OAS API Definition + "middleware": { + "global": { + "skipQuota": true, + "skipQuotaReset": false + } + }, + // ... more config follows +} +``` + +Refer to the [Tyk OAS API Definition reference](/api-management/gateway-config-tyk-oas#global) for details. + + + + +In a Tyk Classic API Definition (JSON), set the `disable_quota` field to `true`. + +```json +{ + // Partial config from Tyk Classic API Definition + "disable_quota": true // Set to true to disable quota checks + // ... more config follows +} + +``` + +Refer to the [Tyk Classic API Definition reference](/api-management/gateway-config-tyk-classic) for details. + + + + + + + +### Configure via UI + +The Tyk Dashboard provides a straightforward interface to set request quota parameters on Security Policies and Access Keys. + + + + + +The image below shows a policy with request quotas. Any key using this policy will inherit the quota settings and behave as follows: each key will be permitted 1000 requests per 24-hour (86400 seconds) cycle before the quota resets. + +policy with request quota configured + +
+ +1. Navigate to **API Security > Policies** in the Tyk Dashboard sidebar +2. Click the **Add Policy** button +3. Under the **1. Access Rights** tab and in the **Add API Access Rule** section, select the required API +4. Scroll down to the **Global Limits and Quota** section (still under the **1. Access Rights** tab): + * Enable `Request Quotas` by setting the following values in the `Usage Quotas` section: + * Uncheck the `Unlimited Requests` checkbox + * Field **Requests (or connection attempts) per period** - Enter the total number of requests a client can use during the defined quota period. + * Field **Quota resets every:** - Select the duration of the quota period. +5. Select the **2. Configuration** tab +6. In the **Policy Name** field, enter a name +7. From the **Key expire after** dropdown, select an option +8. Click the **Create Policy** button + + +
+ + + +The image below shows an access key with request quotas. This access key behaves as follows: each key will be permitted 1000 requests per 24-hour (86400 seconds) cycle before the quota resets. + +**Note:** Direct key configuration overrides policy settings only for that specific key. + +policy with request quota configured + +
+ +1. Navigate to **API Security > Keys** in the Tyk Dashboard sidebar +2. Click the **Create Key** button +3. Under the **1. Access Rights** tab: + * Select **Choose API** + * In the **Add API Access Rule** section, select the required API +4. Scroll down to the **Global Limits and Quota** section (still under the **1. Access Rights** tab): + * Enable `Request Quotas` by setting the following values in the `Usage Quotas` section: + * Uncheck the `Unlimited Requests` checkbox + * Field **Requests (or connection attempts) per period** - Enter the total number of requests a client can use during the defined quota period. + * Field **Quota resets every:** - Select the duration of the quota period. +5. Select the **2. Configuration** tab +6. In the **Alias** field, enter a name. This human-readable identifier makes tracking and managing this specific access key easier in your analytics and logs. +7. From the **Expires** dropdown, select an option +8. Click the **Create Key** button + + +
+ +
+ +### Configure via API + +These are the fields that you can set directly in the Policy object or the Access Key: + +```json +{ + // Partial policy/session object fields + "quota_max": 1000, // Allow one thousand requests + "quota_renewal_rate": 86400, // 1 day or 24 hours + // ... more config follows +} +``` + + + + + +To update the policy, do the following: +1. Retrieve the policy object using `GET /api/portal/policies/{POLICY_ID}` +2. Add or modify the `quota_max` and `quota_renewal_rate` fields within the policy JSON object +3. Update the policy using `PUT /api/portal/policies/{POLICY_ID}` with the modified object, or create a new one using `POST /api/portal/policies/` + +**Explanation:** +The above adds request quotas to a policy. Any key using this policy will inherit the quotas settings and behaves as follows: each key will be permitted 1000 requests per 24-hour (86400 seconds) cycle before the quota resets. + + + + + +**Note:** Direct key configuration overrides policy settings only for that specific key. + +To update the access key do the following: +1. Retrieve the key's session object using `GET /api/keys/{KEY_ID}` +2. Add or modify the `quota_max` and `quota_renewal_rate` fields within the session object JSON +3. Update the key using `PUT /api/keys/{KEY_ID}` with the modified session object + +**Explanation:** +The above adds quotas to an access key. Any request made by the key will behave as follows: each key will be permitted 1000 requests per 24-hour (86400 seconds) cycle before the quota resets. + + + + + +### Important Considerations + +* **Policy Precedence:** Quotas set on a Security Policy apply to all keys using that policy *unless* overridden by a specific quota set directly on the key (using the "Set per API Limits and Quota" option). +* **Unlimited Quota:** Setting `quota_max` to `-1` grants unlimited requests for the quota period. +* **Event-Driven Resets:** Quotas reset *after* the `quota_renewal_rate` (in seconds) has passed *and* upon the next request using the key. They do not reset automatically on a fixed schedule (e.g., precisely at midnight or the 1st of the month) unless external automation updates the session object. +* **Response Headers:** When quotas are active, Tyk typically adds `X-RateLimit-Limit`, `X-RateLimit-Remaining`, and `X-RateLimit-Reset` headers to responses, allowing clients to track their usage. (Note: Header names might be configurable). + +--- +## How It Works + +Request Quotas in Tyk limit a client's total number of API requests within a defined period (hours, days, months). Unlike rate limits that control the frequency of requests over short intervals (like seconds or minutes) to prevent immediate system overload, Request Quotas control the total volume of requests allowed over much longer periods to manage overall consumption and align with service tiers. + +When clients reach their quota limit, further requests are rejected until the quota period renews. It helps API providers implement usage-based pricing tiers, prevent API abuse, control infrastructure costs, and ensure fair resource distribution among clients. + +Think of Request Quotas as a prepaid phone plan with a fixed number of minutes per month. When you sign up, you get allocated a specific number of call minutes (API requests) that you can use over the billing period. You can make calls (API requests) at any pace you want – all at once or spread throughout the month – but once you've used up your allocated minutes, you can't make any more calls until the next billing cycle begins. + +```mermaid +flowchart LR + Client[API Client] -->|Makes Request| Gateway[Tyk Gateway] + Gateway -->|Check Quota| Redis[(Redis)] + Redis -->|Quota OK| Gateway + Redis -->|Quota Exceeded| Gateway + Gateway -->|If Quota OK| Upstream[Upstream API] + Gateway -->|If Quota Exceeded| Reject[Reject Request] + Upstream -->|Response| Gateway + Gateway -->|Response| Client +``` + +### How Tyk Implements Quotas + +Tyk implements request quotas using a Redis-based counter mechanism with time-based expiration. Here's a detailed breakdown of the implementation: + +```mermaid +graph LR + A[API Request Received] --> B(Check Redis Quota Counter); + B -- Counter < QuotaMax --> C{Increment Redis Counter}; + C --> D[Calculate quota_remaining = QuotaMax - Counter]; + D --> E[Update Session State]; + E --> F[Forward Request to Upstream]; + B -- Counter >= QuotaMax --> G[Reject Request with 403]; +``` + +#### Core Components + +1. **Redis Storage**: Quotas are tracked in Redis using incrementing counters for each API key. The TTL is set to the quota renewal period, and the counter is reset to 0 on the next request after expiration. + + Here is a sample Redis key for a Request Quota: + ``` + quota-[scope]-[key_hash] + ``` + + Where: + - `scope` is optional and represents an API-specific allowance scope + - `key_hash` is the hashed API key (if hash keys are enabled) + +2. **Session State**: Quota configuration is stored in the user's `SessionState`, which contains several quota-related fields: + + - `QuotaMax`: Maximum number of requests allowed during the quota period. + - `QuotaRemaining`: Number of requests remaining for the current period. **Note:** This is a derived value, not the primary counter. + - `QuotaRenews`: Unix timestamp when the quota will reset. + - `QuotaRenewalRate`: Time in seconds for the quota period (e.g., 3600 for hourly quotas). + +3. **Middleware**: The quota check is performed by the `RateLimitAndQuotaCheck` middleware + +#### Quota Enforcement + +The core logic for checking and enforcing Request Quotas is executed within the `RateLimitAndQuotaCheck` middleware, which is a step in the request processing pipeline. Here's a breakdown of this process: + +1. **Initiation:** As a request enters the Tyk Gateway, it passes through configured middleware. The quota validation process begins when it hits the `RateLimitAndQuotaCheck` middleware. + +2. **Applicability Check:** The middleware first determines if quota enforcement is relevant: + * It checks the API Definition to see if quotas are globally disabled. If so, the process stops here for quotas and the request proceeds. + * It identifies the API key for the request and retrieves its associated `SessionState`. + +3. **Retrieve Limits:** The middleware accesses the `SessionState` to get the specific quota parameters applicable to this key and potentially the specific API being accessed (if per-API quotas are configured): + * `QuotaMax`: The maximum number of requests allowed. + * `QuotaRenewalRate`: The duration (in seconds) of the quota period for setting the TTL in Redis. + +4. **Redis Interaction & Enforcement:** This is the core enforcement step, interacting directly with Redis: + * **Construct Key:** Generates the unique Redis key for tracking this specific quota counter (e.g., `quota-{scope}-{api-key-hash}`). + * **Check Expiry/Existence:** It checks Redis to see if the key exists and if its TTL is still valid. + * **Handle Renewal (If Expired/Missing):** If the key doesn't exist or its TTL has passed, Tyk initiates the renewal logic described previously (attempting a distributed lock, setting the counter to 0, and applying the `QuotaRenewalRate` as the new TTL). + * **Increment Counter:** Tyk atomically increments the Redis counter value. This operation returns the *new* value of the counter *after* the increment. + * **Compare Against Limit:** The middleware compares this *new* counter value against the `QuotaMax` retrieved from the session state. + * **Decision:** + * If `new_counter_value <= QuotaMax`: The request is within the allowed quota. + * If `new_counter_value > QuotaMax`: This request has exceeded the quota limit. + +5. **Outcome:** + * **Quota OK:** The middleware allows the request to proceed to the next stage in the processing pipeline (e.g., other middleware, upstream service). + * **Quota Exceeded:** The middleware halts further request processing down the standard pipeline. It prepares and returns an error response to the client, typically `HTTP 403 Forbidden` with a "Quota exceeded" message. + +6. **Session State Update:** Regardless of whether the request was allowed or blocked due to the quota, the middleware calls an internal function (like `updateSessionQuota`) to update the in-memory `SessionState` associated with the API key. This update synchronizes the `QuotaRemaining` field in the session with the latest calculated state based on the Redis counter and its expiry. It ensures that subsequent operations within the same request lifecycle (if any) or diagnostic information have access to the most recent quota status. + +#### Quota Reset Mechanisms + +The available allowance (`QuotaRemaining`) for an API key is replenished back to its maximum (`QuotaMax`) through several distinct mechanisms: + +1. **Event-Driven Renewal (Primary Mechanism):** + * **Condition:** This occurs *after* the time duration specified by `QuotaRenewalRate` (in seconds) has elapsed since the quota period began (i.e., since the last reset or key creation/update). In Redis, this corresponds to the Time-To-Live (TTL) expiring on the quota tracking key. + * **Trigger:** The reset is **not** automatic based on a timer. It is triggered by the **next API request** made using that specific key *after* the `QuotaRenewalRate` duration has passed (and the Redis TTL has expired). + * **Process:** Upon detecting the expired TTL during that next request, Tyk resets the Redis counter (typically by setting it to 0 and immediately incrementing it to 1 for the current request) and applies a *new* TTL based on the `QuotaRenewalRate`. This effectively makes the full `QuotaMax` available for the new period starting from that moment. + + ```mermaid + graph LR + A[Request After Quota Period] --> B{Redis Key Expired?}; + B -- Yes --> C[Reset Counter to 0]; + C --> D[Set New Expiration]; + D --> E[Process Request Normally]; + B -- No --> F[Continue Normal Processing]; + ``` + +2. **Manual Reset via API:** + * **Mechanism:** You can force an immediate quota reset for a specific API key by calling an endpoint on the Tyk Gateway Admin API. + * **Effect:** This action directly deletes the corresponding quota tracking key in Redis. The *next* request using this API key will find no existing key, triggering the renewal logic (Step 1) as if the period had just expired, immediately granting the full `QuotaMax` and setting a new TTL. This provides an immediate, on-demand refresh of the quota allowance. + +3. **Key Creation or Update:** + * **Trigger:** When a new API key is created or an existing key's configuration is updated (e.g., via the Dashboard or the Gateway API), Tyk reapplies the quota settings based on the current policy or key-specific configuration. + * **Process:** This typically involves setting the `QuotaRemaining` value to `QuotaMax` in the key's session data and ensuring the corresponding Redis key is created with the correct initial value (or implicitly reset) and its TTL set according to the `QuotaRenewalRate`. This ensures the key starts with a fresh quota allowance according to its defined limits. + * **Exception:** This behavior can be suppressed if the API definition includes the `DontSetQuotasOnCreate` field (referred to as `SkipQuotaReset` in the OAS specification), which prevents automatic quota resets during key creation or updates. + +#### Key Technical Aspects + +1. **Time-Based Reset**: Unlike rate limiting, which uses sliding windows, quotas have a fixed renewal time determined by `QuotaRenewalRate` (in seconds) + +2. **Atomic Operations**: Redis pipelining is used to ensure atomic increment and expiration setting: + +3. **Race Condition Handling**: Distributed locks prevent multiple servers from simultaneously resetting quotas + +4. **Quota Scope Support**: The implementation supports both global quotas and API-specific quotas through the scoping mechanism + +--- +## FAQs + + + +Request Quotas in Tyk limit the total number of API requests a client can make within a specific time period. Unlike rate limits (which control requests per second), quotas control the total number of requests over longer periods like hours, days, or months. Once a quota is exhausted, further requests are rejected until the quota is renewed. + + + +While both control API usage, they serve different purposes: +- **Rate Limits** control the frequency of requests (e.g., 10 requests per second) to prevent traffic spikes and ensure consistent performance +- **Request Quotas** control the total volume of requests over a longer period (e.g., 10,000 requests per month) to manage overall API consumption and often align with business/pricing models + + + +Refer this [documentation](#configuration-options). + + + +The main parameters for configuring quotas are: +- `quota_max`: Maximum number of requests allowed during the quota period +- `quota_remaining`: Number of requests remaining for the current period +- `quota_renewal_rate`: Time in seconds during which the quota is valid (e.g., 3600 for hourly quotas) +- `quota_renews`: Timestamp indicating when the quota will reset + + + +You can disable quotas for specific APIs by setting the `disable_quota` flag to `true` in the API definition. This config will bypass quota checking for all requests to that API, regardless of any quotas set at the key or policy level. + +Refer this [documentation](#api-level-configuration). + + + +When a quota is exceeded: +1. The request is rejected with a 403 Forbidden status code +2. A "QuotaExceeded" event is triggered (which can be used for notifications or monitoring) +3. The client must wait until the quota renewal period before making additional requests +4. The quota violation is logged and can be monitored in the Tyk Dashboard + + + +Tyk stores quota information in Redis: +- Quota keys are prefixed with "quota-" followed by the key identifier +- For each request, Tyk increments a counter in Redis and checks if it exceeds the quota_max +- When a quota period expires, the counter is reset +- For distributed deployments, quota information is synchronized across all Tyk nodes + + + +Yes, you can implement per-endpoint quotas using policies enabling the "per_api" partitioning. This config allows you to define different quota limits for API endpoints, giving you fine-grained control over resource usage. + + + +Organization quotas allow you to limit the total number of requests across all keys belonging to an organization. When enabled (using `enforce_org_quotas`), Tyk tracks the combined usage of all keys in the organization and rejects requests when the organization quota is exceeded, regardless of individual key quotas. + + + +Yes, Tyk provides quota monitoring capabilities: +- You can set up trigger monitors with percentage thresholds +- When usage reaches a threshold (e.g., 80% of quota), Tyk can trigger notifications +- These notifications can be sent via webhooks to external systems +- The monitoring configuration is set in the `monitor` section of your Tyk configuration + + + +Tyk's quota renewal is event-driven rather than time-driven. Quotas don't automatically reset at specific times (like midnight); instead, they reset when the first request is made after the renewal period has passed. If no requests are made after renewal, the quota counter remains unchanged until the next request triggers the check and renewal process. + + + +You can manually reset a quota for a specific key in two ways: + +**Via Tyk Dashboard:** +1. Navigate to the "Keys" section +2. Find and select the key you want to reset +3. Click on "Reset Quota" button + +**Via Tyk Gateway API:** +``` +POST /tyk/keys/reset/{key_id} +Authorization: {your-gateway-secret} +``` +This endpoint will immediately reset the quota for the specified key, allowing the key to make requests up to its quota limit again. + + + +Yes, by default, Tyk counts all requests against the quota regardless of the response status code (2xx, 4xx, 5xx). This means that even failed requests with error responses will decrement the available quota. This behavior is designed to prevent abuse and ensure consistent quota enforcement regardless of the upstream API's response. + + + +In multi-datacenter or multi-region setups, quota inconsistencies can occur due to: + +1. **Redis replication lag**: If you're using separate Redis instances with replication, there may be delays in syncing quota information +2. **Network latency**: In geographically distributed setups, network delays can cause temporary inconsistencies +3. **Configuration issues**: Each gateway must be properly configured to use the same Redis database for quota storage + +To resolve this, ensure all gateways are configured to use the same Redis database or a properly configured Redis cluster with minimal replication lag. Consider using Redis Enterprise or a similar solution with cross-region synchronization capabilities for multi-region deployments. + + + +In some older versions of Tyk, setting `quota_max` to -1 (to disable quotas) would generate an error log message: `Quota disabled: quota max <= 0`. This was a known issue that has been fixed in more recent versions. + +If you're still seeing these logs, consider: +1. Upgrading to the latest version of Tyk +2. Adjusting your log level to reduce noise +3. Using the API definition's `disable_quota` flag instead of setting `quota_max` to -1 + +This log message is informational and doesn't indicate a functional problem with your API. + + + +By default, Tyk counts all requests against the quota regardless of the response code. There is no built-in configuration to count only successful (2xx) responses toward quota limits. + +If you need this functionality, you have two options: +1. Implement a custom middleware plugin that conditionally decrements the quota based on response codes +2. Use the Tyk Pump to track successful vs. failed requests separately in your analytics platform and implement quota management at the application level + + + +If you modify a quota configuration mid-period (before the renewal time): + +1. For **increasing** the quota: The new maximum will apply, but the current remaining count stays the same +2. For **decreasing** the quota: If the new quota is less than what's already been used, further requests will be rejected +3. For **changing the renewal rate**: The new renewal period will apply from the next renewal + +Changes to quota settings take effect immediately, but don't reset the current usage counter. Use the "Reset Quota" functionality to apply new settings and reset the counter immediately. + + + +Yes, Tyk provides several ways to implement different quota plans: + +1. **Policies**: Create different policies with varying quota limits and assign them to keys based on subscription level +2. **Key-specific settings**: Override policy quotas for individual keys when necessary +3. **Meta Data**: Use key metadata to adjust quota behavior through middleware dynamically +4. **Multiple APIs**: Create separate API definitions with different quota configurations for different service tiers + +This flexibility allows you to implement complex quota schemes that align with your business model and customer tiers. + + + +When troubleshooting quota issues: + +1. **Check Redis**: Ensure Redis is functioning properly and examine the quota keys directly +2. **Review logs**: Look for quota-related messages in the Tyk Gateway logs +3. **Verify configuration**: Confirm that quota settings are correctly configured in policies and API definitions +4. **Test with the API**: Use the Tyk Gateway API to check quota status for specific keys +5. **Monitor request headers**: Examine the `X-Rate-Limit-Remaining` headers in API responses + +For multi-gateway setups, verify that all gateways use the same Redis instance and that there are no synchronization issues between Redis clusters. + + + diff --git a/api-management/request-throttling.mdx b/api-management/request-throttling.mdx new file mode 100644 index 000000000..63fbbc6b7 --- /dev/null +++ b/api-management/request-throttling.mdx @@ -0,0 +1,437 @@ +--- +title: "Request Throttling" +description: "What is Request Throttling in Tyk Gateway?" +keywords: "Request Throttling" +sidebarTitle: "Request Throttling" +--- + +## Introduction + +Tyk's Request Throttling feature provides a mechanism to manage traffic spikes by queuing and automatically retrying client requests that exceed [rate limits](/api-management/rate-limit), rather than immediately rejecting them. This helps protect upstream services from sudden bursts and improves the resilience of API interactions during temporary congestion. + +--- +## Quick Start + +### Overview + +In this tutorial, we will configure Request Throttling on a Tyk Security Policy to protect a backend service from sudden traffic spikes. We'll start by defining a basic rate limit on a policy, then enable throttling with specific retry settings to handle bursts exceeding that limit, associate a key with the policy, and finally test the behaviour using simulated traffic. This guide primarily uses the Tyk Dashboard for configuration. + +### Prerequisites + +- **Working Tyk Environment:** You need access to a running Tyk instance that includes both the Tyk Gateway and Tyk Dashboard components. For setup instructions using Docker, please refer to the [Tyk Quick Start](https://github.com/TykTechnologies/tyk-pro-docker-demo?tab=readme-ov-file#quick-start). +- **Curl, seq and xargs**: These tools will be used for testing. + +### Instructions + +#### Create an API + +1. **Create an API:** + 1. Log in to your Tyk Dashboard. + 2. Navigate to **API Management > APIs** + 3. Click **Add New API** + 4. Click **Import** + 5. Select **Import Type** as **Tyk API** + 6. Copy the below Tyk OAS definition in the text box and click **Import API** to create an API + + + + + ```json + { + "components": { + "securitySchemes": { + "authToken": { + "in": "header", + "name": "Authorization", + "type": "apiKey" + } + } + }, + "info": { + "title": "Request Throttling Test", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": {}, + "security": [ + { + "authToken": [] + } + ], + "servers": [ + { + "url": "http://tyk-gateway.localhost:8080/request-throttling-test/" + } + ], + "x-tyk-api-gateway": { + "info": { + "name": "Request Throttling Test", + "state": { + "active": true + } + }, + "middleware": { + "global": { + "contextVariables": { + "enabled": true + }, + "trafficLogs": { + "enabled": true + } + } + }, + "server": { + "authentication": { + "enabled": true, + "securitySchemes": { + "authToken": { + "enabled": true + } + } + }, + "listenPath": { + "strip": true, + "value": "/request-throttling-test/" + } + }, + "upstream": { + "url": "http://httpbin.org/" + } + } + } + ``` + + + + +#### Configure Policy and Rate Limit + +2. **Create and Configure an Security Policy with Rate Limiting:** + + + + + 1. Navigate to **API Security > Policies** in the Tyk Dashboard sidebar + 2. Click the **Add Policy** button + 3. Under the **1. Access Rights** tab, in the **Add API Access Rule** section, select the `Request Throttling Test` API + 4. Scroll down to the **Global Limits and Quota** section (still under the **1. Access Rights** tab): + * Set the following values for `Rate Limiting` + * Enter `5` into the **Requests (or connection attempts)** field + * Enter `10` into the **Per (seconds):** field + 5. Select the **2. Configuration** tab + 6. In the **Policy Name** field, enter `Request Throttling Policy` + 7. From the **Key expire after** dropdown, select `1 hour` + 8. Click the **Create Policy** button + + + + + policy with throttling configured + +3. **Associate an Access Key with the Policy:** + + + + + 1. Navigate to **API Security > Keys** in the Tyk Dashboard sidebar + 2. Click the **Add Key** button + 3. Under the **1. Access Rights** tab: + * In the **Apply Policy** section, select the `Request Throttling Policy` API + 5. Select the **2. Configuration** tab + 6. In the **Alias** field, enter `Request Throttling Key`. This provides a human-readable identifier that makes tracking and managing this specific access key easier in your analytics and logs. + 7. From the **Expires** dropdown, select `1 hour` + 8. Click the **Create Key** button + 9. A pop-up window **"Key created successfully"** will appear displaying the key details. **Copy the Key ID** value shown and save it securely. You will need this key to make API requests in the following steps + 10. Click **OK** to close the pop-up + + + + +4. **Test Rate Limit** + + So far, we've created a policy for an API definition and created a key that complies with that policy. Before enabling throttling, let's observe the standard rate limiting behaviour. We'll send 10 requests in parallel using `xargs` to simulate a burst that exceeds our configured limit (5 requests per 10 seconds). + + 1. Open your terminal. + 2. Execute the following command, replacing `` with the API Key ID you saved earlier: + + ```bash + seq 10 | xargs -n1 -P10 -I {} bash -c 'curl -s -I -H "Authorization: " http://tyk-gateway.localhost:8080/request-throttling-test/ | head -n 1' + ``` + + 3. **Expected Observation:** You should see some requests succeed with `HTTP/1.1 200 OK`, and other requests failing with `HTTP/1.1 429 Too Many Requests` as the rate limit is immediately enforced. The order of `200s` vs `429s` might vary depending upon the processing time, but you will see immediate rejections once the limit is hit. + + **Sample Output (Illustrative):** + + ```bash + HTTP/1.1 429 Too Many Requests + HTTP/1.1 429 Too Many Requests + HTTP/1.1 429 Too Many Requests + HTTP/1.1 429 Too Many Requests + HTTP/1.1 429 Too Many Requests + HTTP/1.1 200 OK + HTTP/1.1 200 OK + HTTP/1.1 200 OK + HTTP/1.1 200 OK + HTTP/1.1 200 OK + ``` + +#### Configure Throttling + +Now that the policy enforces a basic rate limit, we will enable and configure Request Throttling. This adds the queue-and-retry behavior for requests that exceed the limit, preventing immediate rejection and helping to smooth out traffic spikes. + +5. **Configure Request Throttling by Updating the Security Policy** + + 1. Navigate to **API Security > Policies** in the Tyk Dashboard sidebar + 2. Click on the `Request Throttling Policy` + 3. Under the **1. Access Rights** tab: + * In the **Global Limits and Quota** section + * Set the following values for `Throttling` + * Uncheck the `Disable Throttling` checkbox + * Enter `3` into the **Throttle retries (or connection attempts)** field + * Enter `5` into the **Per (seconds):** field + 4. Click the **Update** button + 5. A pop-up window will appear to confirm the changes. Click **Update** to close the pop-up + +#### Testing + +6. **Test Request Throttling** + + 1. **Repeat the Test:** Open your terminal and execute the *exact same command* as in step 4: + + ```bash + seq 10 | xargs -n1 -P10 -I {} bash -c 'curl -s -I -H "Authorization: " http://tyk-gateway.localhost:8080/request-throttling-test/ | head -n 1' + ``` + + 2. **Expected Observation:** + * You will still see the first ~5 requests return `HTTP/1.1 200 OK` quickly + * Critically, the subsequent requests (6 through 10) will **not** immediately return `429`. Instead, you should observe a **delay** before their status lines appear + * After the delay (`throttle_interval`), Tyk will retry the queued requests. Some might now succeed (return `200 OK`) if the rate limit window allows + * If a request is retried `throttle_retry_limit` (3) times and still encounters the rate limit, *then* it will finally return `HTTP/1.1 429 Too Many Requests` + * Overall, you might see more `200 OK` responses compared to the previous test, and any `429` responses will appear significantly later + + **Sample Output (Illustrative - timing is key):** + + ```bash + HTTP/1.1 200 OK # Appears quickly + HTTP/1.1 200 OK # Appears quickly + HTTP/1.1 200 OK # Appears quickly + HTTP/1.1 200 OK # Appears quickly + HTTP/1.1 200 OK # Appears quickly + # --- Noticeable pause here --- + HTTP/1.1 200 OK + # --- Noticeable pause here --- + HTTP/1.1 200 OK + # --- Noticeable pause here --- + HTTP/1.1 200 OK + HTTP/1.1 200 OK + HTTP/1.1 200 OK + ``` + *(The exact mix of 200s and 429s on the delayed requests depends heavily on timing relative to the 10-second rate limit window reset and the retry attempts).* + +This comparison clearly shows how Request Throttling changes the behaviour from immediate rejection to queued retries, smoothing the traffic flow and potentially allowing more requests to succeed during bursts. + +--- +## Configuration Options + +Request Throttling is configured within Tyk [Security Policies](/api-management/policies) or directly on individual [Access Keys](/api-management/authentication/bearer-token). + +The configuration involves setting two specific fields: + +- `throttle_interval`: Defines the wait time (in seconds) between retry attempts for a queued request. (*Note*: Do not set it to `0`. If you do, no delay is applied, and the request is immediately retried. This will creates a β€œbusy waiting” scenario that consumes more resources than a positive interval value) +- `throttle_retry_limit`: Sets the maximum number of retry attempts before the request is rejected. (*Note*: Do not set it to `0`. Setting it to `0` means that there will be no throttling on the request) + +To enable throttling, both fields must be set to a value greater than `0`. + +### Disable throttling + +The default value is `-1` and means it is disabled by default. +Setting `throttle_interval` and `throttle_retry_limit` values to any number smaller than `0`, to ensure the feature is diabled. + +You can configure these settings using either the Tyk Dashboard UI or the Tyk Dashboard API. + +### Configure via UI + +The Tyk Dashboard provides a straightforward interface to set throttling parameters on both Security Policies and Access Keys. + + + + + +The image below shows a policy with throttling. Any key using this policy will inherit the throttling settings and behaves as follows: wait 2 seconds between retries for queued requests, attempting up to 3 times before failing (so overall 6 seconds before getting another 429 error response). + +policy with throttling configured + +
+ +1. Navigate to **API Security > Policies** in the Tyk Dashboard sidebar +2. Click the **Add Policy** button +3. Under the **1. Access Rights** tab and in the **Add API Access Rule** section, select the required API +4. Scroll down to the **Global Limits and Quota** section (still under the **1. Access Rights** tab): + * To enable *Throttling*, we must configure *Rate Limiting* in the policy. + * Field **Requests (or connection attempts)** - Enter the number of requests you want to allow before rate limit is applied. + * Field **Per (seconds):** - Enter the time window in seconds during which the number of requests specified above is allowed. + * Now enable `Throttling` by setting the following values in the `Throttling` section: + * Uncheck the `Disable Throttling` checkbox + * Field **Throttle retries (or connection attempts)** - Enter the maximum number of times Tyk should attempt to retry a request after it has been queued due to exceeding a rate limit or quota. + * Field **Per (seconds):** - Enter the time interval in seconds Tyk should wait between each retry attempt for a queued request. +5. Select the **2. Configuration** tab +6. In the **Policy Name** field, enter a name +7. From the **Key expire after** dropdown, select an option +8. Click the **Create Policy** button + + +
+ + + +The image below shows an access key with throttling. This access key behaves as follows: wait 2 seconds between retries for queued requests, attempting up to 3 times before failing (so overall 6 seconds before getting another 429 error response). + +**Note:** Direct key configuration overrides policy settings only for that specific key. + +access key with throttling configured + +
+ +1. Navigate to **API Security > Keys** in the Tyk Dashboard sidebar +2. Click the **Create Key** button +3. Under the **1. Access Rights** tab: + * Select **Choose API** + * In the **Add API Access Rule** section, select the required API +4. Scroll down to the **Global Limits and Quota** section (still under the **1. Access Rights** tab): + * To enable *Throttling*, we must configure *Rate Limiting* in the Access Key. + * Field **Requests (or connection attempts)** - Enter the number of requests you want to allow before rate limit is applied. + * Field **Per (seconds):** - Enter the time window in seconds during which the number of requests specified above is allowed. + * Now enable `Throttling` by setting the following values in the `Throttling` section: + * Uncheck the `Disable Throttling` checkbox + * Field **Throttle retries (or connection attempts)** - Enter the maximum number of times Tyk should attempt to retry a request after it has been queued due to exceeding a rate limit or quota. + * Field **Per (seconds):** - Enter the time interval in seconds Tyk should wait between each retry attempt for a queued request. +5. Select the **2. Configuration** tab +6. In the **Alias** field, enter a name. This provides a human-readable identifier that makes tracking and managing this specific access key easier in your analytics and logs. +7. From the **Expires** dropdown, select an option +8. Click the **Create Key** button + + +
+ +
+ +### Configure via API + +These are the fields that you can set directly in the Policy object or the Access Key: + +```json +{ + // Partial policy/session object fields + "throttle_interval": 2, // Wait 2 second between retries + "throttle_retry_limit": 3, // Attempt a maximum of 3 retries + // ... more config follows +} +``` + + + + + +To update the policy, do the following: +1. Retrieve the policy object using `GET /api/portal/policies/{POLICY_ID}` +2. Add or modify the `throttle_interval` and `throttle_retry_limit` fields within the policy JSON object +3. Update the policy using `PUT /api/portal/policies/{POLICY_ID}` with the modified object, or create a new one using `POST /api/portal/policies/` + +**Explanation:** +The above adds throttling to a policy. Any key using this policy will inherit the throttling settings and behaves as follows: wait 1 second between retries for queued requests, attempting up to 5 times before failing (so overall 5 seconds before getting another 429 error response). + + + + + +Note: Direct key configuration overrides policy settings only for that specific key. + +To update the access key do the following: +1. Retrieve the key's session object using `GET /api/keys/{KEY_ID}` +2. Add or modify the `throttle_interval` and `throttle_retry_limit` fields within the session object JSON +3. Update the key using `PUT /api/keys/{KEY_ID}` with the modified session object + + +**Explanation:** +The above adds throttling to a key. Any request made by the key will behave as follows: wait 1 second between retries for queued requests, attempting up to 5 times before failing (so overall 5 seconds before getting another 429 error response). + + + + + +--- +## How It Works + +```mermaid +flowchart LR + A[Client Request] --> GW(Tyk Gateway); + + subgraph Rate Limits + GW --> RL{Rate Limit OK?}; + RL -- Yes --> Q{Quota OK?}; + RL -- No --> T{Throttle Enabled?}; + Q -- Yes --> Fwd[Forward Request]; + Q -- No --> Reject[Reject Request]; + end + + subgraph Throttling Logic + T -- No --> Reject; + T -- Yes --> Queue[Queue Request]; + Queue --> Wait[Wait ThrottleInterval]; + Wait --> RetryL{Retry Limit Reached?}; + RetryL -- Yes --> Reject; + RetryL -- No --> Recheck(Re-evaluate Rate Limit Only); + %% Loop back to rate limit check only %% + Recheck --> RL; + end + + Fwd --> Backend((Upstream Service)); + Backend --> Success((Success Response)); + Success --> Client; + Reject --> Failure((Failure Response)); + Failure --> Client; +``` + +Tyk's Request Throttling intercepts API requests *after* they have exceeded a configured [Rate Limit](/api-management/rate-limit). + +Instead of immediately rejecting these requests with a `429 Too Many Requests` error (which is the default rate-limiting behaviour), the Gateway temporarily holds them in a queue. After waiting for a specified duration (`throttle_interval`), Tyk attempts to process the request again, re-checking the rate limit status. + +This retry cycle repeats until either the request can be successfully processed (if capacity becomes available) or a configured maximum number of retries (`throttle_retry_limit`) is reached. Only after exhausting all retries does Tyk return the `429` error to the client. + +Think of it like trying to access a service with a restriction on how many people can enter per minute (Rate Limit). If you arrive when the per-minute limit is full, standard behaviour is to turn you awa +y immediately. With Throttling enabled, the service instead asks you to wait briefly (the interval) and tries your entry again shortly, checking if the rate limit has freed up capacity, repeating this a f +ew times (the retry limit) before finally turning you away if access is still restricted. + +--- +## FAQ + + + + +Request Throttling in Tyk is a mechanism that allows for graceful handling of rate limit violations. Instead of immediately rejecting requests that exceed rate limits, throttling gives clients a chance to retry after a specified delay. + + + +Rate Limiting is a mechanism to restrict the number of requests a client can make in a given time period (e.g., 100 requests per minute). Request Throttling is an extension of rate limiting that provides a retry mechanism when rate limits are exceeded. Instead of immediately failing with a 429 status code, throttling allows the gateway to wait and retry the request internally. + + + +No, Request Throttling in Tyk is exclusively linked to rate limits and does not work with request quotas. When a quota is exceeded, the request is immediately rejected without any throttling or retry attempts. Throttling is only applied when rate limits are exceeded. + + + +Refer to this [documentation](#configuration-options). + + + +Request Throttling can increase response times for requests that exceed rate limits, as the gateway will wait for the specified `ThrottleInterval` between retry attempts. The maximum additional latency would be `ThrottleInterval Γ— ThrottleRetryLimit` seconds. This trade-off provides better success rates at the cost of potentially longer response times for some requests. + + + +Yes, Tyk tracks throttled requests in its health check metrics. You can monitor the `ThrottledRequestsPS` (throttled requests per second) metric to see how often requests are being throttled. Additionally, when a request is throttled, Tyk emits a `RateLimitExceeded` event that can be captured in your monitoring system. + + + +No, Request Throttling is not enabled by default. To enable throttling, you need to explicitly set `ThrottleRetryLimit` to a value greater than 0 and configure an appropriate `ThrottleInterval`. These settings can be applied through policies or directly in access keys. + + \ No newline at end of file diff --git a/api-management/response-caching.mdx b/api-management/response-caching.mdx new file mode 100644 index 000000000..383ae8f8e --- /dev/null +++ b/api-management/response-caching.mdx @@ -0,0 +1,754 @@ +--- +title: "Caching Responses" +description: "How to manage users, teams, permissions, rbac in Tyk Dashboard" +keywords: "Caching, Request Optimization, Optimization, Endpoint Caching, Configuration, Cache" +sidebarTitle: "Response Caching" +--- + +## Overview + +The Tyk Gateway can cache responses from your upstream services. + +API Clients which make subsequent requests to a cached endpoint will receive the cached response directly from the Gateway, which: + - reduces load on the upstream service + - provides a quicker response to the API Client (reduces latency) + - reduces concurrent load on the API Gateway + +Caching is best used on endpoints where responses infrequently change and are computationally expensive for the upstream service to generate. + +### Caching with Tyk + +Tyk uses Redis to store the cached responses and, as you'd expect from Tyk, there is lots of flexibility in how you configure caching so that you can optimize the performance of your system. + +There are two approaches to configure caching for an API deployed with Tyk: + + - [Basic](/api-management/response-caching#basic-caching) or [Safe Request](/api-management/response-caching#global-cache-safe-requests) caching is applied at the API level for all requests for which it is safe to do so. + - [Advanced](/api-management/response-caching#endpoint-caching) caching options can be applied at the endpoint level. + +Tyk's advanced caching options allow you to selectively cache the responses to all requests, only those from specific paths or only responses with specific status codes returned by the API. You can even cache dynamically based upon instruction from the upstream service received within the response. + +Caching is enabled by default at the Gateway level, but no caching will happen until the API Definition is configured to do so. + +### Cache Terminology and Features + +#### Cache Key +Cache keys are used to differentiate cached responses, such that slight variations in the request can generate different cache keys. This enables you to configure the cache so that different API Clients receive different cached responses when accessing the same API endpoint. + +This makes for a very granular cache, which may result in duplication of cached responses. This is preferable to the cache not being granular enough and therefore rendering it unsuitable for use, such that two API Clients receive the same cached response when this is not desired. + +The cache key is calculated using many factors: + - request HTTP method + - request URL (API path/endpoint) + - keys and values of any headers specified by `cache_by_headers` property + - hash of the request body + - API Id of the requested API + - value of the authorization header, if present, or if not, the API Client IP address + +#### Cache Value +The value stored in the cache is a base64 encoded string of the response body. When a subsequent request matches the cache key (a **cache hit**), Tyk decodes the cache value and returns this to the API Client that made the request. + +#### Indicating a Cached Response +When a request causes a cache hit, the Gateway will add a special header to indicate that the response being received is from a cache: + - `X-Tyk-Cached-Response` is added to the response header with the value `1` + +The API Client can use this to identify cached responses from non-cached responses. + +#### Global Cache (Safe Requests) +We define a safe request as any category of API request that is considered cacheable without causing any undesired side effects or security concerns. These are requests made using the HTTP methods `GET`, `HEAD` or `OPTIONS` that do not modify data and can be safely cached for performance gains (i.e. they should be idempotent and so are good candidates for caching). If these methods are not idempotent for your API, then you should not use safe request caching. + +Safe request caching at the API level is enabled by setting the `cache_all_safe_requests` option to `true`, or by checking the equivalent checkbox in the Dashboard UI. This will enable safe request caching on all endpoints for an API. + +This mode of operation is referred to as Global Caching because it is applied globally within the scope of a single API. Picking this approach will override any per-endpoint (per-path) caching configuration, so it’s not suitable if granular control is required. + +Tyk does support safe request caching at the more granular, per-endpoint level, as described [here](/api-management/response-caching#request-selective-cache-control) - but `cache_all_safe_requests` must be set to `false` in that scenario. + +#### Cache Timeout +The cache timeout (Time-To-Live or TTL) value can be configured per API and is the maximum age for which Tyk will consider a cache entry to be valid. You should use this to optimize the tradeoff between reducing calls to your upstream service and potential for changes to the upstream data. + +If the timeout has been exceeded when a request is made to a cached API, that request will be passed to the upstream and the response will (if appropriate) be used to refresh the cache. + +The timeout is configured in seconds. + +#### Cache Response Codes +You can configure Tyk to cache only responses with certain HTTP status codes (e.g. 200 OK), for example to save caching error responses. You can configure multiple status codes that will be cached for an API, but note that this applies only to APIs that return with an HTTP status code in the response. + +#### Dynamic Caching +By default Tyk maintains its response cache with a separate entry for each combination of API key (if authorization is enabled), request method and request path. Dynamic caching is a more flexible method of caching API responses based on header or body content rather than just the request method and path. This allows for more granular caching control and maintainance of separate caches for different users or request properties. + +#### Upstream Cache Control +Upstream cache control refers to caching API responses based on instructions provided by the upstream service within the response headers. This allows the upstream service to have more control over which responses are cached and for how long. + +## Basic Caching + +_On this page we describe the use of Tyk's API response cache at the API level (Global); for details on the more advanced Endpoint level cache you should refer to [this](/api-management/response-caching#endpoint-caching) page._ + +Caching is configured separately for each API according to values you set within the API definition. Subsequently, the caching scope is restricted to an API definition, rather than being applied across the portfolio of APIs deployed in the Gateway. + +If you are using the Tyk Dashboard you can set these options from the Dashboard UI, otherwise you will need to edit the raw API definition. + +### Configuring Tyk's API-level cache +Within the API Definition, the cache controls are grouped within the `cache_options` section. + +The main configuration options are: + - `enable_cache`: Set to `true` to enable caching for the API + - `cache_timeout`: Number of seconds to cache a response for, after which the next new response will be cached + - `cache_response_codes`: The HTTP status codes a response must have in order to be cached + - `cache_all_safe_requests`: Set to `true` to apply the caching rules to all requests using `GET`, `HEAD` and `OPTIONS` HTTP methods + +For more advanced use of the API-level cache we also have: + - `cache_by_headers`: used to create multiple cache entries based on the value of a [header value](#selective-caching-by-header-value) of your choice + - `enable_upstream_cache`: used to allow your [upstream service](/api-management/response-caching#upstream-cache-control-1) to identify the responses to be cached + - `cache_control_ttl_headers`: used with `enable_upstream_cache` + +#### An example of basic caching +To enable global caching for all safe requests to an API, only storing HTTP 200 responses, with a 10 second time-to-live (TTL), you would set: +``` +"cache_options": { + "enable_cache": true, + "cache_timeout": 10, + "cache_all_safe_requests": true, + "cache_response_codes": [200] +} +``` + + + +If you set `cache_all_safe_requests` to true, then the cache will be global and *all* inbound requests made to the API will be evaluated by the caching middleware. This is great for simple APIs, but for most, a finer-grained control is required. This control will over-ride any per-endpoint cache configuration. + + + +#### Selective caching by header value +To create a separate cache entry for each response that has a different value in a specific HTTP header you would configure the `cache_option.cache_by_headers` option with a list of the headers to be cached. + +For example, to cache each value in the custom `Unique-User-Id` header of your API response separately you would set: +``` + "cache_options": { + "cache_by_headers": ["Unique-User-Id"] +} +``` + + + +The `cache_by_headers` configuration is not currently exposed in the Dashboard UI, so it must be enabled though either the raw API editor or the Dashboard API. + + + +### Configuring the Cache via the Dashboard +Follow these simple steps to enable and configure basic API caching via the Dashboard. + +**Steps for Configuration:** + +1. **Go to the Advanced Options** + + From the API Designer, select the **Advanced Options** tab: + + Advanced options tab location + +2. **Set the Cache Options for the Global Cache** + + Cache settings + + Here you must set: + + 1. **Enable caching** to enable the cache middleware + 2. **Cache timeout** to set the [TTL](/api-management/response-caching#cache-timeout) (in seconds) for cached requests + 3. **Cache only these status codes** to set which [response codes](/api-management/response-caching#cache-response-codes) to cache (ensure that you click **ADD** after entering each response code so that it is added to the list) + 4. **Cache all safe requests** to enable the [global cache](/api-management/response-caching#global-cache-safe-requests) + +## Endpoint Caching + +### Overview + +On this page we describe how to configure Tyk's API response cache per endpoint within an API. This gives granular control over which paths are cached and allows you to vary cache configuration across API versions. For details on the API level (Global) cache you should refer to the [global-cache](/api-management/response-caching#basic-caching) configuration page. + +When you use the API-level cache, Tyk will maintain a cache entry for each combination of request method, request path (endpoint) and API key (if authentication is enabled) for an API. The Endpoint Caching middleware gives you granular control over which paths are cached and allows you to vary cache configuration across API versions. + +For details on the API-level cache you should refer to the [API-level cache](/api-management/response-caching#basic-caching) configuration page. + +#### When to use the Endpoint Caching middleware + +##### API with multiple endpoints +When your API has more than one endpoint the upstream data could have different degrees of freshness, for example the data returned by one endpoint might refresh only once every five minutes (and so should be suitably cached) whilst another might give real-time data and so should not be cached. The endpoint cache allows you to optimize the caching of each endpoint to meet your requirements. + +##### Request based caching +If you have an API that's providing search capability (for example into a catalog of products) and want to optimize the performance for the most frequently requested search terms, you could use the endpoint cache's [request-selective](#request-selective-cache-control) capability to cache only a subset of all requests to an endpoint. + +#### How the endpoint cache works +If caching is enabled then, by default, Tyk will create separate cache entries for every endpoint (path) of your API. This may be unnecessary for your particular API, so Tyk provides a facility to cache only specific endpoint(s). + +The endpoint-level cache relies upon the API-level cache being enabled but then allows you to enable the middleware for the specific endpoints that you wish to cache. No other endpoint requests will be cached. + +For each endpoint in your API with endpoint caching middleware enabled, you can configure which response codes should be cached (for example, you might not want to cache error responses) and also the refresh interval - or timeout - for the cache entries. + + + +It's important to note that the [cache all safe requests](/api-management/response-caching#global-cache-safe-requests) feature of the API-level cache will overrule the per-endpoint configuration so you must ensure that both are not enabled for the same API. + + + +##### Request-selective cache control +For ultimate control over what Tyk caches, you can optionally configure the endpoint cache middleware to look for specific content in the request body. Tyk will then create a separate cache entry for each response where the request matches the specific combination of method, path and body content. + +You define a regex pattern and, if Tyk finds a match for this anywhere in the request body, the response will be cached. + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + ## Internal Endpoint middleware summary + - The Endpoint Cache middleware is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Endpoint Cache middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + +### Using Tyk OAS API + +The [Endpoint Caching](/api-management/response-caching#endpoint-caching) middleware allows you to perform selective caching for specific endpoints rather than for the entire API, giving you granular control over which paths are cached. + +When working with Tyk OAS APIs the middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](/api-management/response-caching#using-classic-api) page. + +#### Configuring the middleware in the Tyk OAS API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. The `path` can contain wildcards in the form of any string bracketed by curly braces, for example `{user_id}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +**Configuring the endpoint cache is performed in two parts:** + +1. **Enable Tyk's caching function** + + The caching function is enabled by adding the `cache` object to the `global` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API. + + This object has the following configuration: + - `enabled`: enable the cache for the API + - `timeout`: set as the default cache refresh period for any endpoints for which you don't want to configure individual timeouts (in seconds) + +2. **Enable and configure the middleware for the specific endpoint** + + The endpoint caching middleware (`cache`) should then be added to the `operations` section of `x-tyk-api-gateway` for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + + The `cache` object has the following configuration: + - `enabled`: enable the middleware for the endpoint + - `timeout`: set to the refresh period for the cache (in seconds) + - `cacheResponseCodes`: HTTP responses codes to be cached (for example `200`) + - `cacheByRegex`: Pattern match for [selective caching by body value](/api-management/response-caching#request-selective-cache-control) + + For example: + ```json {hl_lines=["37-40", "45-51"],linenos=true, linenostart=1} + { + "components": {}, + "info": { + "title": "example-endpoint-cache", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/delay/5": { + "post": { + "operationId": "delay/5post", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-endpoint-cache", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-endpoint-cache/", + "strip": true + } + }, + "global": { + "cache": { + "enabled": true, + "timeout": 60 + } + }, + "middleware": { + "operations": { + "delay/5post": { + "cache": { + "enabled": true, + "cacheResponseCodes": [ + 200 + ], + "timeout": 5 + } + } + } + } + } + } + ``` + + In this example the endpoint cache middleware has been configured to cache `HTTP 200` responses to requests to the `POST /delay/5` endpoint. The cache will refresh after 5 seconds. Note that requests to other endpoints will also be cached, with a default cache timeout of 60 seconds according to the configuration in lines 37-40. + + The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the endpoint caching. + +#### Configuring the middleware in the API Designer + +Adding endpoint caching to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Endpoint Cache middleware** + + Select **ADD MIDDLEWARE** and choose the **Cache** middleware from the *Add Middleware* screen. + + Adding the Endpoint Cache middleware + +3. **Configure the middleware** + + Set the timeout and HTTP response codes for the endpoint. You can remove a response code from the list by clicking on the `x` next to it. + + Configuring the endpoint cache middleware for a Tyk OAS API + + + + + Body value match or [request selective](/api-management/response-caching#request-selective-cache-control) caching is not currently exposed in the Dashboard UI, so it must be enabled though either the raw API editor or the Dashboard API. + + + + Select **UPDATE MIDDLEWARE** to apply the change to the middleware configuration. + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +### Using Classic API + +The [Endpoint Caching](/api-management/response-caching#endpoint-caching) middleware allows you to perform selective caching for specific endpoints rather than for the entire API, giving you granular control over which paths are cached. + +When working with Tyk Classic APIs the middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](/api-management/response-caching#using-tyk-oas-api) page. + +If using Tyk Operator please refer to section [configuring the middleware in the Tyk Operator](#tyk-operator). + +#### Configuring the middleware in the Tyk Classic API Definition + +When using the Tyk Classic API Definition, there are two options for endpoint caching - simple and advanced. + +The [simple](#simple-endpoint-cache) option works with the API-level cache and allows you to select which endpoints are cached, but relies upon the cache timeout (refresh) configured at the API-level. It will cache all responses received from the endpoint regardless of the HTTP response code for all [safe requests](/api-management/response-caching#global-cache-safe-requests). + +The [advanced](#advanced-endpoint-cache) option allows you to cache more selectively, giving control over the HTTP response codes to be cached, a per-endpoint cache timeout and also the possibility of caching responses only to requests containing specific data in the request body. + +##### Simple endpoint cache + +To enable the simple middleware you must add a new `cache` object to the `extended_paths` section of your API definition. The `cache` object is a list of endpoints for which you wish to cache all safe requests. + +In the API-level `cache_options` you must enable caching and configure the timeout whilst ensuring that the option to cache all safe requests is disabled. + +The `cache_options` object has the following configuration: +- `enable_cache`: set to `true` to enable caching for this API +- `cache_all_safe_requests`: set to `false` to allow selective caching per-endpoint +- `cache_timeout`: set to the refresh period for the cache (in seconds) + +For example: +```json {linenos=true, linenostart=1} +{ + "cache_options": { + "enable_cache": true, + "cache_timeout": 60, + "cache_all_safe_requests": false + }, + + "extended_paths": { + "cache": [ + { + "/widget", + "/fish" + } + ] + } +} +``` + +In this example, the endpoint caching middleware has been configured to cache all safe requests to two endpoints (`/widget` and `/fish`) with a cache refresh period of 60 seconds. + +##### Advanced endpoint cache + + +For ultimate control over what Tyk caches, you should use the advanced configuration options for the per-endpoint cache. You can separately configure, for each HTTP method for an endpoint: +- an individual cache refresh (timeout) +- a list of HTTP response codes that should be cached +- a pattern match to cache only requests containing specific data in the [request body](/api-management/response-caching#request-selective-cache-control) + +To enable the advanced middleware you must add a new `advance_cache_config` object to the `extended_paths` section of your API definition. + +In the API-level `cache_options` you must enable caching and ensure that the option to cache all safe requests is disabled. The timeout that you set here will be used as a default for any endpoints for which you don't want to configure individual timeouts. + +The `advance_cache_config` object has the following configuration: +- `path`: the endpoint path +- `method`: the endpoint method +- `timeout`: set to the refresh period for the cache (in seconds) +- `cache_response_codes`: HTTP response codes to be cached (for example `200`) +- `cache_key_regex`: pattern match for selective caching by body value + +For example: +```json {linenos=true, linenostart=1} +{ + "cache_options": { + "enable_cache": true, + "cache_timeout": 60, + "cache_all_safe_requests": false + }, + + "extended_paths": { + "advance_cache_config": [ + { + "disabled": false, + "method": "POST", + "path": "/widget", + "cache_key_regex": "", + "cache_response_codes": [ + 200 + ], + "timeout": 10 + }, + { + "disabled": false, + "method": "GET", + "path": "/fish", + "cache_key_regex": "^shark$", + "cache_response_codes": [ + 200, 300 + ], + "timeout": 0 + } + ] + } +} +``` + +In this example the endpoint caching middleware has been configured to cache requests to two endpoints (`/widget` and `/fish`) as follows: + +| endpoint | HTTP response codes to cache | cache refresh timeout | body value regex | +| :---------- | :------------------------------ | :----------------------- | :------------------ | +| `POST /widget` | 200 | 10 seconds | none | +| `GET /fish` | 200, 300 | 60 seconds (taken from `cache_options`) | `shark` | + +#### Configuring the middleware in the API Designer + +You can use the API Designer in the Tyk Dashboard to configure the endpoint caching middleware for your Tyk Classic API by following these steps. + +##### Simple endpoint cache + +To enable and configure the simple endpoint cache, follow these instructions: + +1. **Configure the API level caching options** + + From the **Advanced Options** tab configure the cache as follows: + - **Enable caching** to enable the cache middleware + - **Cache timeout** to configure the timeout (in seconds) for cached requests + - **Cache only these status codes** is a list of HTTP status codes that should be cached, remember to click **Add** after entering each code to add it to the list + - **Cache all safe requests** ensure that this is **not** selected, otherwise the responses from all endpoints for the API will be cached + + Cache Options + +2. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to cache responses. Select the **Cache** plugin. + + Dropdown list showing Cache plugin + +3. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +##### Advanced endpoint cache + +To enable and configure the advanced endpoint cache, follow these instructions: + +1. **Configure the API level caching options** + + From the **Advanced Options** tab configure the cache as follows: + - **Enable caching** to enable the cache middleware + - **Cache timeout** to configure the default timeout (in seconds) for any endpoints for which you don't want to configure individual timeouts + - **Cache only these status codes** leave this blank + - **Cache all safe requests** ensure that this is **not** selected, otherwise the responses from all endpoints for the API will be cached + + Cache Options + +2. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to cache responses. Select the **Advanced Cache** plugin. + + Selecting the Advanced Cache plugin for a Tyk Classic API + +3. **Configure the Advanced Cache plugin** + + Set the timeout and HTTP response codes for the endpoint. If you don't need to set a specific timeout for an endpoint you can leave this blank and Tyk will use the cache timeout configured at the API level. + + Endpoint cache configuration for Tyk Classic API + + + + + Body value match or [request selective](/api-management/response-caching#request-selective-cache-control) caching is not currently exposed in the Dashboard UI, so it must be configured through either the raw API editor or the Dashboard API. + + + +4. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +#### Configuring the middleware in the Tyk Operator + + +You can use Tyk Operator to configure the endpoint caching middleware for your Tyk Classic API by following these steps. + +##### Simple endpoint cache + +Configuring simple endpoint caching in Tyk Operator is similar to the process for a Tyk Classic API Definition. A list of endpoints for which you wish to cache safe requests should be configured within the `cache` list in the `extended_paths` section. + +In the API-level `cache_options` object, you must enable caching by setting `enable_cache` to true and configure the cache refresh period by setting a value for the `cache_timeout` in seconds. To allow selective caching per endpoint you should also set `cache_all_safe_requests`to `false`. + +```yaml {linenos=true, linenostart=1, hl_lines=["26-35"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-cache +spec: + name: httpbin-cache + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-cache + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + cache: + - /get + - /anything + cache_options: + cache_all_safe_requests: false +# cache_by_headers: [] + cache_timeout: 10 + cache_response_codes: + - 400 + enable_cache: true +``` + +##### Advanced endpoint cache + +Advanced caching with Tyk Operator is a similar process to that for configuring the [advanced caching middleware in the Tyk Classic API Definition](#tyk-classic-advanced-caching). + +To enable the advanced middleware you must add a new `advance_cache_config` object to the `extended_paths` section of your API definition. + +This allows you to configure caching per endpoint. For each endpoint, it is possible to specify the endpoint path, method, list of response codes to cache, cache timeout and a cache key regular expression. The cache key regular expression represents a pattern match to cache only requests containing specific data in the [request body](/api-management/response-caching#request-selective-cache-control) + +For example: + +```yaml {linenos=true, linenostart=1, hl_lines=["26-35"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-advance-cache +spec: + name: httpbin-advance-cache + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-advance-cache + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + advance_cache_config: + - path: /anything + method: GET + cache_key_regex: "" + cache_response_codes: [200] + cache_options: + cache_timeout: 30 + enable_cache: true +``` + +In this example the endpoint caching middleware has been configured to cache requests for the `/anything` endpoint as follows: + +| endpoint | HTTP response codes to cache | cache refresh timeout | body value regex | +| :---------- | :------------------------------ | :----------------------- | :------------------ | +| `GET /anything` | 200 | 30 seconds (taken from `cache_options`) | none | + +## Upstream Cache Control + +Upstream cache control refers to the caching of API responses based on instructions provided by the upstream service. This allows the upstream service to have control over which responses are cached and for how long and can be used to perform caching of traditionally "non-safe" requests. The upstream service controls the cache using parameters in the response header. + +This approach gives the most granular control as it will also only cache responses based on the request method. + +For example, if you only want to cache requests made with the `OPTIONS` method, you can configure the upstream cache control accordingly and return cache control headers only in those responses. With this configuration, Tyk will cache only those responses, not those for other methods for the same path. + +Upstream cache control is configured on a per-API and per-endpoint basis, giving maximum flexibility. All configuration is performed within the API definition. + +### Enabling upstream cache control for an API + +To set up upstream cache control, you must configure `cache_options` in the API definition as follows: + - first enable the Tyk cache (using `enable_cache`) + - ensure that global/safe request caching is disabled (`cache_all_safe_requests` is set to `false`) + - set `enable_upstream_cache_control` to `true` + - add the endpoints to be cached to the list in `extended_paths.cache` + +For example, to enable upstream cache control for the `/ip` endpoint (path) of your API you would add the following to the API definition: + +``` +"cache_options": { + "enable_cache": true, + "cache_all_safe_requests": false, + "enable_upstream_cache_control": true, + "extended_paths": { + "cache": [ + "ip" + ] + } +} +``` + +If you are using Tyk Dashboard, you can configure these settings within the Advanced Settings section of the API Designer. You should select **Enable upstream cache control** and deselect **Global cache**, then follow the steps for per-path caching. + +### Operating cache control from the upstream server + +When upstream cache control is configured, the Gateway will check the response from the upstream server for the header `x-tyk-cache-action-set`: + - if this is provided in the response header and is set to `1` or `true` then the response will be stored in the cache + - if the header is empty or absent, Tyk follows its default behavior, which typically involves not caching the request, or caching only valid response codes (`cache_response_codes`) + +The upstream server also controls the length of time that Tyk should cache the response (Time-To-Live or TTL). + +Tyk looks for the header `x-tyk-cache-action-set-ttl` in the response: + - if this is found and has a positive integer value, the Gateway will cache the response for that many seconds + - if the header is not present, Tyk falls back to the value specified in `cache_options.cache_timeout` + +By configuring these headers in the responses from your services, you can have precise control over caching behavior. + +#### Using a custom TTL header key +If you wish to use a different header value to indicate the TTL you can do so by adding the `cache_control_ttl_header` option to the API definition. + +For example, if you configure: + ``` + "cache_options": { + "cache_control_ttl_header": "x-expire" + } + ``` + +and also send `x-expire: 30` in the response header, Tyk will cache that specific response for 30 seconds. + + + +## Invalidating the Cache + +The cache for an API can be invalidated (or flushed) to force the creation of a new cache entry before the cache’s normal expiry. + +This is achieved by calling one of the dedicated cache invalidation API endpoints. There is a cache invalidation endpoint in both the Tyk Dashboard API and Tyk Gateway API; the URLs differ slightly, but they have the same effect. + +For Dashboard-managed deployments, it’s recommended to call the Dashboard API version, as this will handle the delivery of the message to all Gateways in the cluster. + +Caches are cleared on per-API basis, so the request to the invalidation endpoint must include the ID of the API in the path. + +For example, with the Tyk Gateway API: + +``` +DELETE /tyk/cache/{api-id} +``` + +and with the Tyk Dashboard API: + +``` +DELETE /api/cache/{api-id} +``` + +Note that prior to Tyk version 3.0.9 and 4.0, this was not supported on MDCB Data Plane gateways. + + + +Cache invalidation is performed at the API level, so all cache entries for the API will be flushed. + + + +## Optimizing the Cache Storage + +Tyk creates the API cache in Redis, as it gives high performance and low latency. By default, the cache will use the same database that is used to store the API keys, minimizing the deployment footprint. + +For [multi-data center](/api-management/mdcb#redis) deployments, the Data Planes have a locally deployed Redis. This enables them to have a localised cache close to the traffic-serving Gateways. + +The [cache key](/api-management/response-caching#cache-key) is used as the Redis key, for quick lookups. + +For high-traffic systems that make heavy use of caching, it can make sense to use separate Redis databases for cache storage and for API keys, at the expense of increased deployment footprint. + +### Configuring a separate cache +To enable a separate cache server, you must deploy additional Redis instance(s) and apply additional configuration within your Tyk Gateway's `tyk.conf` configuration file. + +You must + - set `enable_separate_cache_store` to `true` + - provide additional Redis connection information in the `cache_storage` section + +For example: +```json +{ +"enable_separate_cache_store": true, +"cache_storage": { + "type": "redis", + "host": "", + "port": 0, + "addrs": [ + "localhost:6379" + ], + "username": "", + "password": "", + "database": 0, + "optimisation_max_idle": 3000, + "optimisation_max_active": 5000, + "enable_cluster": false + } +} +``` + +The configuration of the separate Redis Cache is the same (and uses the same underlying driver) as the regular configuration, so [Redis Cluster](/tyk-configuration-reference/redis-cluster-sentinel#configure-redis-cluster) is fully supported. If you set `enable_cluster` to `false`, you only need to set one entry in `addrs`. + + + +Prior to Tyk Gateway v2.9.3, `hosts` was used instead of `addrs`; since v2.9.3 `hosts` has been deprecated. + + + diff --git a/api-management/security-best-practices.mdx b/api-management/security-best-practices.mdx new file mode 100644 index 000000000..163e93508 --- /dev/null +++ b/api-management/security-best-practices.mdx @@ -0,0 +1,418 @@ +--- +title: "Security Best Practices" +description: "Guide on API management and security best practices, including authentication, authorization, resource protection, governance, and OWASP threat mitigation with Tyk." +keywords: "OWASP, Security, Top Ten, API Management best practice, API Security, Authentication, Security, Configuration, SSL, Certificates, Authentication, Authorization, API security, API Gateway Security" +sidebarTitle: "Security Best Practices" +--- + +## Overview + +This section serves as a detailed resource for understanding key concepts and tools related to API security. It provides explanations of critical practices such as authentication, authorization, and governance, offering insights into how these concepts work and why they matter. Whether you're looking to mitigate threats identified by the [OWASP API Security Top 10](https://owasp.org/API-Security/editions/2023/en/0x00-header/) or to configure your APIs for better resilience, this page breaks down the essentials. + +Two of the most prevalent topics are [authentication](#authentication) and [authorization](#authorization), which occupy four of the top five positions. These are critical elements of API security, which verify the identity of API clients and control what they’re able to do. Alongside these are a number of other beneficial topics that are also within the remit of API management, all of which will be covered in this section. These include: + +- [Governance](#governing-apis-effectively) +- [Configuration](#configuration-best-practices) +- [Resource Consumption](#managing-api-resources) + +## Mitigating The Top 10 OWASP Threats + +The Open Web Application Security Project (OWASP) provides a top ten threat awareness document compiled by security experts. For more details on the OWASP project visit [https://www.owasp.org](https://www.owasp.org). Below are the top ten threats and how Tyk guards against them. For further details please visit our [blog](https://tyk.io/blog/res-owasp-api-security-intro/) + +##### 1 - Broken Object Level Authorization (BOLA) + +Broken Object Level Authorization (BOLA) can occur due to a lack of access control to API resources. This vulnerability allows attackers to manipulate or bypass authorization mechanisms, typically by tampering with resource identifiers to gain unauthorized access to specific resources or data. BOLA is a critical security concern as it can lead to data breaches and unauthorized actions within a system. + +It is the responsibility of the API to handle this form of attack since it can access and understand the data needed to make authorization decisions on individual objects within the application database. + +##### 2 - Broken Authentication + +Authentication is a vital aspect of API security. Failure to do so, as noted by OWASP, leads to *Broken Authentication* posing a significant risk to both API providers and data. + +Tyk provides the following features and authentication mechanisms: +- Prioritize secure methods, like [mutual TLS](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls), over [basic authentication](/api-management/authentication/basic-authentication) wherever feasible. +- API owners can integrate external Identity Providers (IdPs) supporting methods like [OpenID Connect](/api-management/client-authentication#integrate-with-openid-connect-deprecated), [OAuth 2.0](/api-management/authentication/oauth-2#using-the-authorization-code-grant) or [JSON Web Tokens](/basic-config-and-security/security/authentication-authorization/json-web-tokens). +- [Single Sign-On](/api-management/external-service-integration#single-sign-on-sso) can be used for a centralized and trusted authentication source. API operators can choose from common authentication methods such as OAuth 2.0, LDAP, and SAML. +- [Dynamic Client Registration](/tyk-developer-portal/tyk-portal-classic/dynamic-client-registration#oauth-20-dynamic-client-registration-protocol-dcr), enables third-party authorization servers to issue client credentials via the Tyk Developer Portal. This streamlines Identity Management, eliminating the need to manage credentials across multiple systems. +- Tyk's default authentication setup disallows credentials in URLs, reducing the risk of inadvertent exposure through backend logs. +- Tyk Gateway can be configured to enforce a [minimum TLS version](/api-management/certificates#supported-tls-versions), enhancing security by blocking outdated and insecure TLS versions. + +##### 3 - Broken Object Property Level Authorization (BOPLA) + +REST APIs provide endpoints that return all properties of an object in the reponse, some of which could contain sensitive data. Conversely, GraphQL API requests allow the clients to specify which properties of an object should be retrieved. + +From a REST API perspespective, it is the responsibility of the API to ensure that the correct data is retrieved. The Gateway can provide additional security measures as follows: +- [Body transformation plugins](/api-management/traffic-transformation/request-method) can be used to remove sensitive data from the response if the API is unable to do so itself. +- [JSON Schema validation](/api-management/traffic-transformation/request-validation#request-validation-using-classic) to validate that an incoming data payload meets a defined schema. Payloads that do not adhere to the schema are rejected. + +For GraphQL APIs, the gateway can be used to define the GraphQL schemas, limiting which properties of an object are queryable. Furthermore, access can be controlled to specific properties by configuring [field-based permissions](/api-management/graphql#field-based-permissions). Subsequently, the visiblity of a schema's properties can be controlled for different consumers of the GraphQL API. + + +##### 4 - Unrestricted Resource Consumption + +APIs can become overwhelmed if the resources upon which they rely are fully consumed. In such situations, an API can no longer operate, and will no longer be able to service requests, or potentially even be unable to complete those currently in progress. + +As an APIM product, Tyk Gateway can be configured to use the following out-of-the-box functionality when handling API traffic for legitimate users: + +- [Circuit breaker](/planning-for-production/ensure-high-availability/circuit-breakers) +- [Payload size limiter](/api-management/traffic-transformation/request-size-limits) +- [Rate limiter / throttling](/api-management/rate-limit#introduction) +- [Caching](/api-management/response-caching) +- [Enforced timeout](/planning-for-production/ensure-high-availability/enforced-timeouts) +- [IP restriction](/api-management/gateway-config-tyk-classic#ip-access-control) +- [GraphQL query complexity limiting](/api-management/graphql#complexity-limiting-1) + +For Denial of Service (DoS) attacks it is recommended to use specialist 3rd party services to prevent DoS attacks from reaching your infrastructure. + +##### 5 - Broken Function Level Authorization (BFLA) + +To prevent Broken Functional Level Authorization (BFLA), requests to REST API endpoints must be authorized correctly. This involves validating client permissions against the requested resources. Requests from clients with insufficient permissions must be rejected. + +Tyk offers several measures to assist with protection from BFLA threats: + +- *Establish path-based access rights*: [Policies](/api-management/policies#what-is-a-security-policy) are predefined sets of rules which grant access to particular APIs. These can include [path-based permissions](/api-management/policies#secure-your-apis-by-method-and-path), which restrict access to particular paths and methods within an API. Clients can be assigned one or more policies which the Gateway will validate when it receives a request. +- *Access Control*: Tyk has plugins that control access to API endpoints. They are known as [allowlist](/api-management/traffic-transformation/allow-list#api-definition) and [blocklist](/api-management/traffic-transformation/block-list#api-designer) and can be configured via the Endpoint Designer of an API Definition. Both plugins grant and deny access to API paths and methods, but do so in different ways, which makes them mutually exclusive. When the allowlist plugin is used, only the marked paths and methods are allowed, all other paths and methods are blocked. This can be perceived as *deny by default* since it provides the least privileges. The reverse is true for the blocklist plugin, only the paths and methods marked as blocklist are blocked, all other paths and methods are allowed. It is recommended to use the *allowlist* approach, since it is the most restrictive, only allowing marked endpoint paths and paths. +- *CORS*: This [functionality](/api-management/gateway-config-tyk-classic#cross-origin-resource-sharing-cors) allows the Tyk Gateway to limit API access to particular browser-based consumers. + +##### 6 - Unrestricted Access To Sensitive Business Flows + +This involves attackers understanding an API's business model, identifying sensitive business processes and automating unauthorized access to these processes. This can disrupt business operations by preventing legitimate users from making purchases for example. Attackers manually locate target resources and work to bypass any existing mitigation measures. + +These business flows are application specific, being unique to the API's backend systems. Subsequently, the API owner is responsible for addressing the security issues posed by this threat. Furthermore, to discover points of exploitation and test IT security breaches, pentesting is recommended. + +The APIM can be used to protect sensitive endpoints using authentication and authorization. Tyk recommends considering splitting Admin APIs from client facing APIs. This allows authentication and authorization checks to be defined and managed by different governance models, thus establishing clear role models. + +Furthermore, the APIM can validate authentication and authorization by scope to ensure that the client has the correct credentials before the upstream API processes the request. + +##### 7 - Server Side Request Forgery (SSRF) + +Server Side Request Forgery (SSRF) is a security vulnerability in web applications where an attacker can manipulate a server to make unauthorized requests to internal or external resources, potentially leading to data leaks or remote code execution. This can allow an attacker to probe or attack other parts of the application's infrastructure, potentially compromising sensitive information and systems. + +This is application specific and is largely the responsibility of the API. However, Tyk Gateway can assist with this form of attack through [JSON schema validation](/api-management/traffic-transformation/request-validation#request-validation-using-classic) for incoming payloads. For example, a schema could contain a regular expression to reject localhost URLs. These URLs could be used by an attacker to perform port scanning for example. + +##### 8 - Security Misconfiguration + +Tyk offers several mechanisms to help protect an API from Security Misconfiguration exploits: + +- Use [response header manipulation](/api-management/traffic-transformation/response-headers) to remove or modify API sensitive information. +- Use [response body manipulation](/api-management/traffic-transformation/response-body) to remove or modify parts containing sensitive information. +- [TLS](/api-management/certificates) to ensure that clients use the right service and encrypt traffic. +- [Mutual TLS](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls) with both the clients and API to ensure that callers with explicitly allowed client certificates can connect to the endpoints. +- [Error Templates](/api-management/gateway-events#error-templates) can be used to return a response body based on status code and content type. This can help minimize the implementation details returned to the client. +- [CORS functionality](/api-management/gateway-config-tyk-classic#cross-origin-resource-sharing-cors) allows the Tyk Gateway to limit API access to particular browser-based consumers. +- [Policy Path-Based Permissions](/api-management/policies#secure-your-apis-by-method-and-path) and the [allowlist](/api-management/traffic-transformation/allow-list#api-definition) plugin can be used to prevent clients from accessing API endpoints using non-authorized HTTP methods. For example, blocking the use of the DELETE method on an endpoint which should only accept GET requests. +- [Environment variables](/tyk-oss-gateway/configuration) can help standardize configuration across containerised deployments. +- For GraphQL APIs: +- [Schema Introspection](/api-management/graphql#introspection) ensures that the Tyk Dashboard automatically uses the schema of the upstream GraphQL API and can keep it synchronised if it changes. +- [GraphQL Schema Validation](/api-management/graphql#schema-validation) prevents invalid schemas from being saved. This catches errors such as duplicate type names and usage of unknown types. +- Third-party [Secret Storage](/tyk-configuration-reference/kv-store) to centralise configuration of sensitive data such as passwords. This data can then be dynamically referenced by Tyk configuration files, rather than being hard coded. +- Users can can write their own [custom plugins](/api-management/plugins/overview#) in a variety of languages, either directly or through gRPC calls, to implement their requirements. + +The Ops team should also take reponsibility for monitoring the APIs for errors and patching accordingly. Regular [Penetration Tests](https://en.wikipedia.org/wiki/Penetration_test) should be scheduled to ensure the security of published services. Tyk, through our Professional Services or Partners, can assist in the process. + +##### 9 - Improper Inventory Management + +Tyk offers the following features to support improper inventory management: + +- [Versioning](/api-management/api-versioning) allows newer versions of APIs to coexist with the older versions, facilitating deprecation and sunsetting. +- [Sunsetting](/api-management/api-versioning#sunsetting-api-versions) allows versions to be configured with an Expiry Time, ensuring that a version is not accessible after the expiry date. +- [Key expiry](/api-management/policies#access-key-expiry) ensures that access to an API is short lived, with a per key configurable Time to Live (TTL) for which a token remains valid before it expires. The implementation of key expiry, with a configurable Time To Live (TTL), mitigates the impact of compromised tokens by narrowing the window of vulnerability. Setting a TTL reduces the time frame during which a compromised token could be exploited, enhancing overall security. +- Tyk Developer Portal catalogs APIs and facilitates granting access to them. Integrated with a CMDB it can help keep documentation updated. +- [Tyk Analytics](/api-management/dashboard-configuration#traffic-analytics) can help identify the stagnant APIs and used stale APIs. +- [Tyk Pump](/api-management/tyk-pump) can ship metrics needed for analytics into Tyk Dashboard and other systems. +- Third-party [Secret Storage](/tyk-configuration-reference/kv-store) can be used to centralise and protect sensitive configuration data such as passwords, rather than exposing them as plain text in Tyk configuration files. + +In addition, it is best practice to consider any definition of done to include corresponding documentation updates. + +##### 10 - Unsafe Consumption Of APIs + +Attackers may identify and target the third party APIs/services used by an API. This can lead to leaked sensitive information, denial of service, injection attacks etc. + +It is the responsibility of the API to provide protection against these attacks. However, if the organization uses the Gateway as a forwarding proxy to third party APIs, then the following features could be used: + +- [JSON Schema validation](/api-management/traffic-transformation/request-validation#request-validation-using-classic) to validate that an incoming data payload meets a defined schema. Payloads that do not adhere to the schema are rejected. +- [Versioning](/api-management/api-versioning) allows newer versions of third party APIs to coexist with the older versions, facilitating deprecation and sunsetting. +- [TLS](/api-management/certificates) to ensure that clients use the right service and encrypt traffic. + + +## Managing Authentication and Authorization + +### Authentication + +Authentication is the process of identifying API clients. It’s a broad topic, with many approaches to choose from. Choosing the right approach is important, as it forms a fundamental part of the overall security strategy. The decision depends on many risk factors; users, functionality, data, accessibility and compliance, to name just a few. While there isn’t necessarily a single, correct choice, it’s usually safe to assume that some form of authentication is needed, as it’s a crucial prerequisite in performing subsequent identity-based authorization checks. + +**Implement Appropriate Authentication** + +Choose a suitable authentication approach based on the risk profile of the API. Is it publicly accessible or internal? Does it require user interaction or is it machine to machine? How sensitive is the data and functionality provided by the API? Simplistic approaches, such as [Bearer Tokens](/api-management/authentication/bearer-token), can work for low risk, basic APIs, but for higher risk or more sophisticated APIs, it may be more appropriate to use a standards-based approach such as [OAuth 2.0](/api-management/authentication/oauth-2) or [OpenID Connect](/api-management/client-authentication#integrate-with-openid-connect-deprecated). Furthermore, using an [external identity provider](/api-management/client-authentication#integrate-with-external-authorization-server-deprecated) can deliver additional benefits, such as [single sign-on](/api-management/external-service-integration#single-sign-on-sso), as well as multi-factor authentication approaches such as [biometric verification](https://www.okta.com/identity-101/biometrics-secure-authentication). + +**Handle Data Securely** + +Don’t undermine the authentication process by leaking sensitive authentication data. Use [transport layer security](/api-management/certificates) and hashing to prevent credentials from being intercepted and stolen through insecure transmission and storage. These principles also apply to upstream requests made by the gateway and upstream API to other APIs and services. + +**Enforce Good Practices** + + +Establish rules that reduce risk and enhance overall system security. Use [password policies](/api-management/user-management#password-policy) to prevent the use of weak passwords, and [TLS policies](/api-management/certificates#supported-tls-versions) to prevent the use of older TLS versions that are now deprecated and considered vulnerable. + +**Protect Sensitive Endpoints** + +Reduce susceptibility of sensitive endpoints to brute force dictionary or password stuffing attacks. The typical target for this type of attack are endpoints that use credentials, such as login and password recovery. Unfortunately, anonymous access is required for these endpoints, so authentication cannot be used to protect them, so the best approach is to hinder access by using techniques such as [rate limiting](/api-management/rate-limit#rate-limiting-layers), [captcha](https://en.wikipedia.org/wiki/CAPTCHA) and one-time URLs. + + +### Authorization +Authorization is the process of validating API client requests against the access rights they have been granted, ensuring that the requests comply with any imposed limitations. It’s the most prevalent topic on the OWASP list, with three entries covering different levels of authorization. + +Almost any part of a request can be scrutinised as part of authorization, but choosing the best approach depends on the type of API. For example, with REST APIs, the requested method and path are good candidates, but they aren’t relevant for GraphQL APIs, which should focus on the GraphQL query instead. + +Authorization can be a complex process that occurs at multiple locations throughout the request lifecycle. For example, a gateway can use access control policies to determine whether a required path is acceptable. But for decisions based on object data, such as when a client requests a particular record from the database, it’s the API that’s best positioned, as only it has access to the necessary data. For more information about the authorization process, see Authorization Levels in the appendix. + +#### Split Authorization + +Implement authorization in the best locations across the stack. For an overview of the different authorization levels across the stack please visit this [page](#managing-authorization-levels). Use the gateway to handle general API authorization related to hosts, methods, paths and properties. This leaves the API to handle the finer details of object-level authorization. In terms of OWASPs authorization categories, it can be split as follows: + +##### Object Level Authorization + +Handle with the API. It can access and understand the data needed to make authorization decisions on individual objects within its database. + +##### Object Property Level Authorization + +Handle with both the API and the gateway. The approach depends on the type of API: + +For REST APIs, it’s the API that’s primarily responsible for returning the correct data. To complement this, the gateway can use [body transforms](/api-management/traffic-transformation/response-body) to remove sensitive data from responses if the API is unable to do so itself. The gateway can also enforce object property-level restrictions using [JSON validation](/api-management/traffic-transformation/request-validation#request-validation-using-classic), for scenarios where the client is sending data to the API. + +For GraphQL APIs, use the gateway to define [GraphQL schemas](/api-management/graphql#managing-gql-schema) to limit which properties are queryable, then optionally use [field-based permissions](/api-management/graphql#field-based-permission) to also specify access rights to those properties. + +##### Function Level Authorization + +Handle with the gateway. Use [security policies](/api-management/policies), [path-based permissions](/api-management/policies#secure-your-apis-by-method-and-path), [allow lists](/api-management/traffic-transformation/allow-list#api-definition) and [block lists](/api-management/traffic-transformation/block-list#api-designer) to manage authorization of hosts and paths. + +#### Assign Least Privileges + +Design [security policies](/api-management/policies#what-is-a-security-policy) that contain the least privileges necessary for users to achieve the workflows supported by the API. By favoring specific, granular access over broad access, this enables user groups and use cases to be addressed directly, as opposed to broad policies that cover multiple use cases and expose functionality unnecessarily. + +##### Deny by Default + +Favor use of [allow lists](/api-management/traffic-transformation/allow-list#api-definition) to explicitly allow endpoints access, rather than [block lists](/api-management/traffic-transformation/block-list#api-designer) to explicitly deny. This approach prevents new API endpoints from being accessible by default, as the presence of other, allowed endpoints means that access to them is implicitly denied. + +##### Validate and Control All User Input + +Protect APIs from erroneous or malicious data by validating all input before it’s processed by the API. Bad data, whether malicious or not, can cause many problems for APIs, from basic errors and bad user experience, to data leaks and downtime. The standard mitigation approach is to validate all user input, for which there are various solutions depending on the type of API: + +For REST APIs, use [schema validation](/api-management/graphql#schema-validation) to control acceptable input data values. + +For GraphQL APIs, use [GraphQL schema](/api-management/graphql#managing-gql-schema) definitions to limit what data can be queried and mutated. Additionally, [complexity limiting](/api-management/graphql#complexity-limiting-1) can be used to block resource-intensive queries. + +#### Track Anomalies + +Use [log aggregation](/api-management/logs-metrics#exporting-logs-to-third-party-tools) and [event triggers](/api-management/gateway-events#event-categories) to push data generated by application logs and events into centralised monitoring and reporting systems. This real-time data stream can be used to highlight application issues and security-related events, such as authentication and authorization failures. + +##### Understand System State + +Perform application performance monitoring by capturing gateway [instrumentation data](/api-management/logs-metrics#statsd-instrumentation). This enables the current system state, such as requests per second and response time, to be monitored and alerted upon. + +##### Manage Cross-Origin Resource Sharing + +Use [CORS filtering](/api-management/gateway-config-tyk-classic#cross-origin-resource-sharing-cors) to control the resources accessible by browser-based clients. This is a necessity for APIs that expect to be consumed by external websites. + + +### Managing Authorization Levels + +This section provides basic examples of where different authorization levels occur in the API management stack. The accompanying diagrams use color-coding to show links between request element and the associated authorization locations and methods. + +This is how OWASP describe the attack vectors for the three authorization levels: + +**Object Level Authorization**: β€œAttackers can exploit API endpoints that are vulnerable to broken object-level authorization by manipulating the ID of an object that is sent within the request. Object IDs can be anything from sequential integers, UUIDs, or generic strings. Regardless of the data type, they are easy to identify in the request target (path or query string parameters), request headers, or even as part of the request payload.” (source: [OWASP Github](https://github.com/OWASP/API-Security/blob/9c9a808215fcbebda9f657c12f3e572371697eb2/editions/2023/en/0xa1-broken-object-level-authorization.md)) + +**Object Property Level Authorization**: β€œAPIs tend to expose endpoints that return all object’s properties. This is particularly valid for REST APIs. For other protocols such as GraphQL, it may require crafted requests to specify which properties should be returned. Identifying these additional properties that can be manipulated requires more effort, but there are a few automated tools available to assist in this task.” (source: [OWASP Github](https://github.com/OWASP/API-Security/blob/9c9a808215fcbebda9f657c12f3e572371697eb2/editions/2023/en/0xa3-broken-object-property-level-authorization.md)) + +**Function Level Authorization**: β€œExploitation requires the attacker to send legitimate API calls to an API endpoint that they should not have access to as anonymous users or regular, non-privileged users. Exposed endpoints will be easily exploited.” (source: [OWASP Github](https://github.com/OWASP/API-Security/blob/9c9a808215fcbebda9f657c12f3e572371697eb2/editions/2023/en/0xa3-broken-object-property-level-authorization.md)) + + +#### REST API - Reading Data + +Rest API - Read Data + +The client sends a `GET` request using the path `/profile/1`. This path has two parts: + +1. `/profile/`: The resource type, which is static for all requests related to profile objects. This requires function level authorization. + +2. `1`: The resource reference, which is dynamic and depends on the profile is being requested. This requires object level authorization. + +Next, the gateway handles function level authorization by checking that the static part of the path, in this case `/profile/`, is authorized for access. It does this by cross referencing the security policies connected to the API key provided in the `authorization` header. + +The gateway ignores the dynamic part of the part of the path, in this case `1`, as it doesn't have access to the necessary object-level data to make an authorization decision for this. + +Lastly, the API handles object level authorization by using custom logic. This typically involves using the value of the `authorization` header in combination with the ownership and authorization model specific to the API to determine if the client is authorized to read is requested record. + +#### REST API - Writing Data + +Rest API - Write Data + +The client sends a `POST` request using the path `/profile` and body data containing the object to write. The path `/profile` is static and requires function level authorization. The body data contains a JSON object that has two fields: + +1. `name`: A standard object field. This requires object property authorization. + +2. `id`: An object identifier field that refers to the identity of an object, so needs to be treated differently. As such, it requires both object property authorization, like name, and also object authorization. + +Next, the gateway handles function level authorization, by checking that the path, in the case `/profile`, is authorized for access. It does this by cross referencing the security policies connected to the API key provided in the `authorization` header. + +The gateway can also perform object property level authorization, by validating that the values of the body data fields, `name` and `id`, conform to a schema. + +Lastly, the API handles object level authorization by using custom logic. This typically involves using the value of the `authorization` header in combination with the ownership and authorization model specific to the API to determine if the client is authorized to write the requested data. + +#### GraphQL API - Querying Data + +Rest API - Write Data + +The client sends a `POST` request using the path `/graphql` and body data containing a GraphQL query. The path `/graphql` is static and requires function level authorization. The GraphQL query contains several elements: + +- `profile`: An object type, referring to the type of object being requested. This requires object property authorization. +- `id`: An object identifier field that refers to the identity of an object, so needs to be treated differently. As such, it requires both object property authorization, like name, and also object authorization. +- `name`: A standard object field, referring to a property of the profile object type. This requires object property authorization. + +Next, the Gateway handles function level authorization, by checking that the path, in the case `/graphql`, is authorized for access. It does this by cross referencing the security policies connected to the API key provided in the `authorization` header. Due to the nature of GraphQL using just a single endpoint, there is no need for additional path-based authorization features, only a basic security policy is required. + +Another difference between this and the REST examples is in the way that the body data is authorized: + +- All object types and fields contained in the query are checked against the API’s GraphQL schema, to ensure they are valid. In this case, the object type is `profile`, and the fields are `id` and `name`. The schema defined in the gateway configuration can differ from that in the upstream API, which enables fields to be restricted by default. +- Field-based permissions can also be used, to authorize client access of individual fields available in the schema. In this case, `id` and `name`. + +Lastly, the API handles object level authorization by using custom logic. This typically involves using the value of the `authorization` header in combination with the ownership and authorization model specific to the API to determine if the client is authorized to access the requested data. This can be more complicated for GraphQL APIs, as the data presented by the schema may actually come from several different data sources. + +## Managing API Resources + +Excessive resource consumption poses a risk to APIs. As the number of concurrent requests handled by a server increases, so too does its consumption of CPU, RAM and storage resources. Should any of these become depleted, then the quality of service offered by applications running on the server will rapidly decline, and may even lead to their complete failure. + +This issue can be caused by both legitimate consumers and malicious attackers, but they are different situations that require different solutions. For legitimate consumers, solutions should be focused on controlling API utilization through the gateway, to keep usage within agreed or desired limits. But malicious attackers require a different approach, as denial of service attacks must be blocked as far as possible from the core API infrastructure. + +**Restrict Request Flows**: Use [rate limits](/api-management/rate-limit#rate-limiting-layers) and [quotas](/api-management/request-quotas) to prevent excessive API usage. Rate limits are best used for short term control, in the range of seconds. Whereas quotas are more suited to longer terms, in the range of days, weeks or beyond. [Throttling](/api-management/request-throttling) can also be used as a type of enhanced rate limiter that queues and retries requests on the clients behalf, rather than immediately rejecting them. + +**Block Excessively Large Requests**: Place reasonable [limitations on payload sizes](/api-management/traffic-transformation/request-size-limits) to prevent oversized requests from reaching upstream servers, thereby avoiding the unnecessary consumption of resources. + +**Avoid Unnecessary Resource Usage**: Appropriate use of [caching](/api-management/response-caching) can reduce server resource consumption by simply returning cached responses instead of generating new ones. The extent to which caching can be used depends on the purpose of the endpoint, as it’s generally unsuitable for requests that modify data or responses that frequently change. Caching can be applied to [particular requests](/api-management/response-caching#endpoint-caching) or enabled for an [entire API](/api-management/response-caching#basic-caching), and can also be [controlled by the upstream API](/api-management/response-caching#upstream-cache-control-1) or [invalidated programmatically](/api-management/troubleshooting-debugging#how-to-clear--invalidate-api-cache). + +**Limit Complex Long-Running Tasks**: Use [GraphQL complexity limiting](/api-management/graphql#complexity-limiting-1) to prevent convoluted queries from being processed. Alternatively, [timeouts](/planning-for-production/ensure-high-availability/enforced-timeouts) can be used to terminate long-running requests that exceed a given time limit. + +**Protect Failing Services**: Defend struggling endpoints by using a [circuit breaker](/planning-for-production/ensure-high-availability/circuit-breakers). This feature protects endpoints by detecting error responses, then blocking requests for a short duration to allow them to recover. The same principle can be applied in a wider sense by using [uptime tests](/api-management/gateway-config-tyk-classic#uptime-tests), though this works on a host level instead, by removing failed hosts from the gateway load balancer. + +**Enforce Network-Level Security**: Problematic clients can be prevented from accessing the API by [blocking their address](/api-management/gateway-config-tyk-classic#ip-access-control). Conversely, for APIs with a known set of clients, [allow lists](/api-management/gateway-config-tyk-classic#ip-access-control) can be used to create a list of allowed addresses, thereby implicitly blocking every other address from the API. + +**Mitigate DoS Attacks**: Increase the chance of maintaining API availability during a denial of service attack by using [specialist mitigation services](https://www.cloudflare.com). These have the infrastructure capacity needed to handle [large scale distributed attacks](https://www.cloudflare.com/en-gb/learning/ddos/what-is-a-ddos-attack), with the purpose of preventing attacks from reaching the API infrastructure, thereby enabling the API to continue operating normally. + + +## Configuration Best Practices + +Modern APIs are often backed by large technology stacks composed of numerous components and libraries. Each of these is a potential weak link in the security chain, so efforts must be made to ensure that security measures are implemented throughout. The API gateway plays a critical part in an overall security strategy, by utilizing its ability to process requests in a secure manner. + +**Secure Connections** + + +Use [transport layer security](/api-management/certificates) where possible. Most importantly, on inbound connections to the gateway and outbound connection from the gateway to the upstream API and other services. TLS can also be used as a form of authentication, using [Mutual TLS](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls). + +**Limit Functionality** + + +Use [security policies](/api-management/policies#what-is-a-security-policy) to specify which paths, methods and schemas are accessible, whilst blocking all others. + +**Mitigate Server-Side Request Forgery** + + +Restrict any URL-based input data to specific schemas, hosts and paths by using [schema validation](/api-management/graphql#schema-validation). When data is fetched server-side, it should be validated and not returned to the client in raw format. + +**Protect Secrets** + + +Prevent sensitive data, such as usernames, passwords, license keys and other secrets, from being stored as plain text in application configuration files. Use [key value secret storage](/tyk-configuration-reference/kv-store) to dynamically load sensitive data from a secure secret manager. + +**Sanitise Responses** + + +Modify or remove sensitive data from responses by using [transforms](/api-management/traffic-transformation) to alter the [response headers](/api-management/traffic-transformation/response-headers) and [body](/api-management/traffic-transformation/response-body). + + + +**Sign payloads between the Dashboard and Gateway** + + +Using payload signatures for the communication between Tyk Gateway and Tyk Dashboard is strongly recommended as an additional security measure, particularly in production environments. + +Enable payload signatures in the Gateway configuration (`tyk.conf` or environment variable) by setting `allow_insecure_configs` to `false` and then provide the public key (certificate) to the Gateway in the `public_key_path`. + +You'll need to provide the private key to the Dashboard using the `private_key_path` option in the appropriate configuration (`tyk_analytics.conf` or environment variable). This will allow your Dashboard to sign all of its payloads using the private key. + +You can easily create a public / private keypair with: + +```{.copyWrapper} +# private key +openssl genrsa -out privkey.pem 2048 + +# public key +openssl rsa -in privkey.pem -pubout -out pubkey.pem +``` + +Make sure to keep your private key safe! + +
+ +## Governing APIs Effectively + +APIs need to be managed and governed just like any other resource, otherwise organizations risk losing track of their API estate and becoming unaware of potentially vulnerable APIs running within their infrastructure. This risk is magnified as the number of teams, environments and APIs increases. Use API management as part of overarching business processes to control how APIs are accessed, managed and deployed. + +**Restrict Version Availability**: Enforce the expiry of [API versions](/api-management/api-versioning) that are planned for deprecation, by setting a sunset date, beyond which they will not be accessible. + +**Enforce Key Expiry**: In many situations it’s best to issue API keys that have a short, finite lifetime, especially when serving anonymous, external consumers. Set [expiry dates](/api-management/policies#access-key-expiry) for API keys, or use ephemeral credentials with complementary authentication techniques that support key renewal, such as [OAuth 2.0 refresh tokens](/api-management/authentication/oauth-2#using-refresh-tokens) and [dynamic client registration](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/dynamic-client-registration). Then, should an API key fall into the wrong hands, there’s a chance that it has already expired. + +**Use Standardized Specifications**: Use the [OpenAPI Specification](https://en.wikipedia.org/wiki/OpenAPI_Specification) standard to design APIs. These specification documents act as a source of truth that can generate [API configuration](/api-management/gateway-config-tyk-oas) and [portal documentation](/tyk-apis/tyk-portal-api/portal-documentation#create-documentation). + +**Understand API Usage**: Use [API analytics](/api-management/dashboard-configuration#traffic-analytics) to report on usage. This captured data generates useful, actionable insights across a variety of metrics, such as API popularity, performance and trends. + +**Control API Distribution**: Use [sharding](/api-management/multiple-environments#what-is-api-sharding-) to control availability of APIs across multi-gateway, multi-environment deployments. This ensures that specific APIs are only available through specific gateways, which helps to prevent undesirable situations, such as internal APIs being published to externally accessible gateways, or test API configurations reaching the production environment. +
+ +## Securing APIs with Tyk + +Securing your APIs is one of the primary uses of Tyk API management solution. Out of the box, the Gateway offers a lot of functionality for securing your APIs and the Gateway itself. + +This section outlines all of the security configurations and components that are available to you when securing your Tyk stack. + +This section outlines some of the key security concepts that Tyk uses and that you should be familiar with before setting up and using a Tyk stack to secure your API. + +**Key Hashing** + + +See [Key Hashing](/api-management/policies#access-key-hashing) for details on how Tyk obfuscates keys in Redis. + +**TLS and SSL** + + +Tyk supports TLS connections and Mutual TLS. All TLS connections also support HTTP/2. Tyk also supports Let's Encrypt. See [TLS and SSL](/api-management/certificates) for more details. + +**Trusted Certificates** + + +As part of using Mutual TLS, you can create a list of [trusted certificates](/basic-config-and-security/security/mutual-tls/client-mtls#how-does-mutual-tls-work). + +**Certificate Pinning** + + +Introduced in Tyk Gateway 2.6.0, [certificate pinning](/api-management/upstream-authentication/mtls#certificate-pinning) is a feature which allows you to allow only specified public keys used to generate certificates, so you will be protected in case an upstream certificate is compromised. + +**API Security** + +Tyk supports various ways to secure your APIs, including: + +* Bearer Tokens +* HMAC +* JSON Web Tokens (JWT) +* Multi Chained Authentication +* OAuth 2.0 +* OpenID Connect + +See [Authentication and Authorization](/api-management/client-authentication) for more details. + +**Security Policies** + + +A Tyk security policy incorporates several security options that can be applied to an API key. These include [Partioned Policies](/api-management/policies#partitioned-policies) and securing by [Method and Path](/api-management/policies#secure-your-apis-by-method-and-path). + +See [Security Policies](/api-management/policies) for more details. diff --git a/api-management/security-features.mdx b/api-management/security-features.mdx new file mode 100644 index 000000000..48ce4ec8a --- /dev/null +++ b/api-management/security-features.mdx @@ -0,0 +1,86 @@ +--- +title: "Security Features" +description: "Guide on API management and security best practices, including authentication, authorization, resource protection, governance, and OWASP threat mitigation with Tyk." +keywords: "Security, security features, CORS, API Security, Cross-Origin Resource Sharing, Security, Configuration" +sidebarTitle: "Security Features" +--- + +## Cross-Origin Resource Sharing (CORS) + +CORS (Cross-Origin Resource Sharing) is a security feature that controls how web pages from one domain (origin) can make requests to resources hosted on a different domain. With Tyk Gateway, it is possible to enable and configure CORS per-API so that users can make browser-based requests. + +The `CORS` section is added to an API definition as listed in the examples below for Tyk Gateway and Tyk Operator. + +### Examples + + + +```json +"CORS": { + "enable": true, + "allowed_origins": [ + "http://foo.com" + ], + "allowed_methods": [], + "allowed_headers": [], + "exposed_headers": [], + "allow_credentials": false, + "max_age": 24, + "options_passthrough": false, + "debug": false +} +``` + + +```yaml {linenos=true, linenostart=1, hl_lines=["14-24"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-cors-sample +spec: + name: httpbin-cors-sample + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /cors + strip_listen_path: true + CORS: + enable: true + allowed_origins: + - "http://foo.com" + allowed_methods: null + allowed_headers: null + exposed_headers: null + allow_credentials: false + max_age: 24 + options_passthrough: false + debug: false +``` + + + +--- + +### Configuration + +The CORS middleware has the following options: + +* `CORS.allowed_origins`: A list of origin domains to allow access from. Wildcards are also supported, e.g. `http://*.foo.com`. Default value is `["*"]` + +* `CORS.allowed_methods`: A list of methods to allow access via. Default value is `["GET", "POST", "HEAD"]` + +* `CORS.allowed_headers`: A list of headers that are allowed within a request. Default value is `["Origin", "Accept", "Content-Type", "X-Requested-With"]` + +* `CORS.exposed_headers`: A list of headers that are exposed back in the response. + +* `CORS.allow_credentials`: Whether credentials (cookies) should be allowed. + +* `CORS.max_age`: Maximum age of credentials. + +* `CORS.options_passthrough`: allow CORS OPTIONS preflight request to be proxied directly to upstream, without authentication and rest of checks. This means that pre-flight requests generated by web-clients such as SwaggerUI or +the Tyk Portal documentation system will be able to test the API using trial keys. If your service handles CORS natively, then enable this option. + +* `debug`: If set to `true`, this option produces log files for the CORS middleware. + diff --git a/api-management/single-sign-on-ldap.mdx b/api-management/single-sign-on-ldap.mdx new file mode 100644 index 000000000..bce2df91d --- /dev/null +++ b/api-management/single-sign-on-ldap.mdx @@ -0,0 +1,517 @@ +--- +title: "Single Sign On (SS0) with LDAP" +description: "Learn how to integrate external services with Tyk API Gateway. Discover how to use middleware plugins, webhooks, and service discovery to extend your API functionality and connect with third-party systems." +keywords: "Tyk Identity Broker, TIB, Identity Provider, Identity Handler, SSO, Custom Authentication, Custom Proxy Provder, SAML, OIDC, OpenID Connect, Profies, IDPs, Social Provider, LDAP" +sidebarTitle: "SSO with LDAP" +--- + +## Dashboard SSO with LDAP + +The Tyk Dashboard is the command and control center of your Tyk installation. It allows users to manage APIs, policies, keys, etc. All of this data is stored in the Dashboard's MonogDB database, including the user accounts. + +This works well in a lot of situations as it allows Tyk to be self-contained, but if you already have a centralised system for managing users then you may prefer to use that instead of a separate Tyk-specific database. + +Tyk Dashboard uses the [Tyk Identity Broker (TIB)](/api-management/external-service-integration#what-is-tyk-identity-broker-tib) to integrate Tyk authentication with 3rd party identity providers (IDPs). You can use this to enable your Dashboard to authenticate users with your LDAP-powered identity providers such as Active Directory. + +
+ + + +To activate SSO on the Dashboard or Developer portal, there’s no requirement to install TIB separately; it is integrated into the Dashboard and Developer Portal. You have two configurations for SSO within the dashboard: +1. **Using Embedded TIB**: No need to install it separately. +2. **Using External TIB**: If you are using a previous version of the Dashboard or Portal, you can still use SSO with TIB installed as a separate application. + + + +### Dashboard SSO with Embedded TIB + +Configuring SSO with Embedded TIB is a two-step process: + +1. **[Creating a Profile in Dashboard](#create-profile)** +2. **[Testing the SSO Flow](#testing-the-sso-flow)** + +#### Create Profile + +Create LDAP Profile + +1. Log in to your Tyk Dashboard. +1. Navigate to **User management > User Settings** in the Tyk Dashboard sidebar. +2. Click the **Create Profile** button. +3. Under the **1. Profile action** section: + * In the **Name** field, enter a descriptive name for your profile (e.g., `login-with-ldap`). + * For **Users in this profile can:**, ensure `Login to Tyk Dashboard` is selected. + * In the **Redirect URL on Success** field, enter the URL where users will be redirected after a successful login (e.g., `http://localhost:3000/tap`). + * In the **Redirect URL on failure** field, enter the URL where users will be redirected after a failed login (e.g., `http://localhost:3000/?fail=true`). + * Click the **Next** button. +4. Under the **2. Provider type** section: + * Select `LDAP`. + * Click the **Next** button. +5. Under the **3. Profile Configuration** section (this will become active/focused after the previous step): + * In the **Server** field, enter the hostname or IP address of your LDAP server (e.g., `ldap.forumsys.com`). + * In the **Port** field, enter the port number for your LDAP server (e.g., `389`). + * In the **User DN** field, the distinguished name which TIB will use to identify the user - this should be updated to match your LDAP installation and must retain the `*USERNAME*` token as this is replaced by the actual username at runtime (e.g., `cn=*USERNAME*,dc=example,dc=com`). + * (Optional) Click **+ Advanced Settings (optional)** to configure further LDAP settings if needed. +6. Click the **Create Profile** button. +7. Open the created profile and copy the login URL displayed. Save it, as it will be used later in testing. (e.g., `http://localhost:3000/auth/login-with-ldap/ADProvider`) + +#### Testing the SSO Flow + + + +1. **Create a login page** + + TIB works by having credentials sent to it, so a login page must be made in order to fulfill this requirement. For this example we will create a basic login form hosted by Nginx. We can't just place the login page in our Dashboard directory as the Dashboard is not a standard web server, it only serves the pages which it has been compiled to serve. Any non-compiled page will produce a 404 response. + + Install Nginx and start it: + + ```{.copyWrapper} + sudo apt-get install nginx + sudo service nginx start + ``` + + Nginx will now serve pages out of the default web root directory `/usr/share/nginx/www`. We now need to create a web page there. This command will pipe the echoed text into a file called `login.html` which is stored in the web root: + + ```{.copyWrapper} + echo \ + " \ + \ + Tyk Dashboard LDAP login \ + \ + \ +
\ + username:
\ + password:
\ + \ +
\ + \ + " \ + | sudo tee /usr/share/nginx/www/login.html > /dev/null + ``` + + The login form contains two inputs named `username` and `password`. TIB looks for these exact parameter names when processing the request, so if you are creating your own login page you must use these input names. + + Please make sure you are using `POST` method in the form, to avoid browser caching. + + The form action `http://localhost:3000/auth/login-with-ldap/ADProvider` is the dashboard (embedded TIB) endpoint which will start the authentication process. + + + +2. **Update the Dashboard config** + + Update the Dashboard config so that any unauthenticated requests are redirected to your custom login page. We do this by updating the `sso_custom_login_url` property of the Dashboard's `tyk_analytics.conf` file, which by default is located in the `/opt/tyk-dashboard` directory. For example (ommitting all other lines in the config file and trailing comma): + + ```{.copyWrapper} + "sso_custom_login_url": "http://localhost/login.html" + ``` + + Since the Dashboard runs on port 3000 by default, this URL will use the default HTTP port of 80 which will be handled by Nginx. + +3. **Test that it works** + + 1. Open a web browser (if you're already logged in to the Dashboard, logout now) and attempt to access the Dashboard - `http://localhost:3000` + 2. This should be redirected to the custom login page - `http://localhost/login.html` + 3. Enter `read-only-admin` as the username + 4. Enter `password` as the password + 5. Submit the form + 6. You should now be logged in to the Dashboard + +### Dashboard SSO with External TIB + +This guide assumes you already have a Tyk environment set up, with a Gateway and Dashboard. If you don't, please follow the [Tyk Self-Managed getting started guide](/tyk-self-managed/install). + +The environment used for this guide is, for simplicity's sake, all contained on a single host running Ubuntu 14.04. The hostname `my-tyk-instance.com` has been set to point at `127.0.0.1`. For production environments it is recommended that each component is hosted separately and appropriate security measures are used such as HTTPS to secure connections. + +All commands shown are run from inside the Tyk host environment. + +#### Setup TIB + +1. **Download TIB** + + You can download TIB from the [releases page of the TIB repository on GitHub](https://github.com/TykTechnologies/tyk-identity-broker/releases). The release names contain the architecture and version i.e. `tib-linux--.tar.gz`. This example uses `amd64` and `0.2.1` for all the commands, but you should update them to use the latest version and relevant architecture for your platform. + + First step is to download TIB onto the environment: + + ```{.copyWrapper} + wget https://github.com/TykTechnologies/tyk-identity-broker/releases/download/v0.2.1/tib-linux-amd64-0.2.1.tar.gz + ``` + +2. **Extract and store TIB** + + As the other Tyk components are installed in your `/opt` directory, we recommend you install TIB there too: + + ```{.copyWrapper} + tar -xvzf tib-linux-amd64-0.2.1.tar.gz + ``` + + TIB will now be extracted to the directory `tib-0.2.1`, let's move this to `/opt` and change to that directory: + + ```{.copyWrapper} + sudo mv tib-0.2.1 /opt + cd /opt/tib-0.2.1 + ``` + +3. **Configure TIB** + + There are two configuration files for TIB: + + 1. `tib.conf` for the main application configuration settings + 2. `profiles.json` to configure the profiles which TIB will attempt to authenticate against + + Out of the box you don't need to change much, but there are several attributes you should check to make sure they are correct for your environment: + + * `Secret`: The REST API secret used when configuring TIB remotely + * `TykAPISettings.GatewayConfig.Endpoint`: The URL through which TIB can communicate with your Tyk Gateway + * `TykAPISettings.GatewayConfig.Port`: The port through which TIB can communicate with your Tyk Gateway + * `TykAPISettings.GatewayConfig.AdminSecret`: The secret required for TIB to communicate with your Tyk Gateway REST API - must match the `secret` property in your Gateway's `tyk.conf` + * `TykAPISettings.DashboardConfig.Endpoint`: The URL through which TIB can communicate with your Tyk Dashboard + * `TykAPISettings.DashboardConfig.Port`: The port through which TIB can communicate with your Tyk Dashboard + * `TykAPISettings.DashboardConfig.AdminSecret`: The secret required for TIB to communicate with your Tyk Dashboard Admin REST API - must match the `admin_secret` property in your Dashboard's `tyk_analytics.conf` + + The `tib.conf` for this example is as follows (yours might require different values): + + ```{.copyWrapper} + { + "Secret": "352d20ee67be67f6340b4c0605b044b7", + "HttpServerOptions": { + "UseSSL": false, + "CertFile": "./certs/server.pem", + "KeyFile": "./certs/server.key" + }, + "BackEnd": { + "Name": "in_memory", + "ProfileBackendSettings": {}, + "IdentityBackendSettings": { + "Hosts" : { + "localhost": "6379" + }, + "Password": "", + "Database": 0, + "EnableCluster": false, + "MaxIdle": 1000, + "MaxActive": 2000 + } + }, + "TykAPISettings": { + "GatewayConfig": { + "Endpoint": "http://localhost", + "Port": "8080", + "AdminSecret": "352d20ee67be67f6340b4c0605b044b7" + }, + "DashboardConfig": { + "Endpoint": "http://localhost", + "Port": "3000", + "AdminSecret": "12345" + } + } + } + ``` + +#### Create Profile + +1. **Set up the LDAP profile** + + TIB ships with a default `profiles.json` file which contains many example configuration for different scenarios. This guide is focused on LDAP authentication for the Dashboard, so we will update `profiles.json` to contain a single profile for this purpose. + + The key attributes for LDAP profile are: + + * `ID`: The ID by which we will activate the profile by calling the appropriate TIB endpoint + * `OrgId`: The organization id which the profile is connected to - make sure this is the correct id for your organization (see the [Dashboard Admin API documentation](/api-management/dashboard-configuration#organizations-api) for details on how to retrieve this) + * `IdentityHandlerConfig.DashboardCredential`: The Dashboard API Access credential which is used as authorization header + * `ProviderConfig.FailureRedirect`: The URL which TIB will redirect to if the authentication fails + * `ProviderConfig.LDAPPort`: The port through which TIB can communicate with your LDAP server + * `ProviderConfig.LDAPServer`: The URL through which TIB can communicate with your LDAP server + * `ProviderConfig.LDAPUserDN`: The distinguished name which TIB will use to identify the user - this should be updated to match your LDAP installation and must retain the `*USERNAME*` token as this is replaced by the actual username at runtime + * `ReturnURL`: The URL which TIB will redirect to if the authentication succeeds - this should be the `/tap` endpoint of your Tyk Dashboard + + The `profiles.json` for this example is as follows (again, update values for your environment): + + ```{.copyWrapper} + [ + { + "ActionType": "GenerateOrLoginUserProfile", + "ID": "1", + "OrgID": "59bfdf5b56c02c065d24638e", + "IdentityHandlerConfig": { + "DashboardCredential": "bb5735026be4400e67ed9801c2f1e2f9" + }, + "ProviderConfig": { + "FailureRedirect": "http://my-tyk-instance.com:3000/?fail=true", + "LDAPAttributes": [], + "LDAPPort": "389", + "LDAPServer": "ldap.forumsys.com", + "LDAPUserDN": "cn=*USERNAME*,dc=example,dc=com" + }, + "ProviderName": "ADProvider", + "ReturnURL": "http://my-tyk-instance.com:3000/tap", + "Type": "passthrough" + } + ] + ``` + + Notice that this is a JSON array object with a single element; an LDAP profile. The LDAP server referenced by this profile is the freely-available service provided forumsys.com. See [their documentation](https://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/) for more information. You can use any OpenLDAP compatible server. + +2. **Start TIB** + + Start TIB by executing the TIB binary. This will produce an output log into the console which you can use to watch TIB process requests. Since TIB looks for the config file in the local directory, you should execute the application from there too. + + ```{.copyWrapper} + cd /opt/tib-0.2.1 + ./tib + ``` + + If all is well you should see TIB output a few messages when it starts: + + ``` + toth/tothic: no SESSION_SECRET environment variable is set. The default cookie store is not available and any calls will fail. Ignore this warning if you are using a different store. + INFO[0000] Tyk Identity Broker v0.2 + INFO[0000] Copyright Martin Buhr 2016 + + DEBU[0000] [MAIN] Settings Struct: {{http://localhost 8080 352d20ee67be67f6340b4c0605b044b7} {http://localhost 3000 12345}} + INFO[0000] [MAIN] Initialising Profile Configuration Store + INFO[0000] [IN-MEMORY STORE] Initialised + INFO[0000] [MAIN] Initialising Identity Cache + INFO[0000] [REDIS STORE] Initialised + INFO[0000] [FILE LOADER] Loaded: 1 profiles from profiles.json + INFO[0000] [MAIN] Broker Listening on :3010 + ``` + + Start a new shell session to carry on with the remaining process. + + +#### Test the SSO Flow + + + +1. **Create a login page** + + TIB works by having credentials sent to it, so a login page must be made in order to fulfill this requirement. For this example we will create a basic login form hosted by Nginx. We can't just place the login page in our Dashboard directory as the Dashboard is not a standard web server, it only serves the pages which it has been compiled to serve. Any non-compiled page will produce a 404 response. + + Install Nginx and start it: + + ```{.copyWrapper} + sudo apt-get install nginx + sudo service nginx start + ``` + + Nginx will now serve pages out of the default web root directory `/usr/share/nginx/www`. We now need to create a web page there. This command will pipe the echoed text into a file called `login.html` which is stored in the web root: + + ```{.copyWrapper} + echo \ + " \ + \ + Tyk Dashboard LDAP login \ + \ + \ +
\ + username:
\ + password:
\ + \ +
\ + \ + " \ + | sudo tee /usr/share/nginx/www/login.html > /dev/null + ``` + + The login form contains two inputs named `username` and `password`. TIB looks for these exact parameter names when processing the request, so if you are creating your own login page you must use these input names. + + Please make sure you are using `POST` method in the form, to avoid browser caching. + + The form action `http://my-tyk-instance.com:3010/auth/1/ldap` is the TIB endpoint which will start the authentication process. The URL can be broken down as follows: + + * `http://my-tyk-instance.com`: The method and hostname used to connect to TIB - you should use HTTPS to prevent confidential data from being exposed + * `3010`: The default port for TIB + * `auth`: The special TIB endpoint which accepts authentication requests + * `1`: The number of the profile which we are using - matches against the `ID` property of the profile in `profiles.json` + * `ldap`: We need to add a string to the end of the request, so we have used `ldap` here + +2. **Update the Dashboard config** + + Update the Dashboard config so that any unauthenticated requests are redirected to your custom login page. We do this by updating the `sso_custom_login_url` property of the Dashboard's `tyk_analytics.conf` file, which by default is located in the `/opt/tyk-dashboard` directory. For example (ommitting all other lines in the config file and trailing comma): + + ```{.copyWrapper} + "sso_custom_login_url": "http://my-tyk-instance.com/login.html" + ``` + + Since the Dashboard runs on port 3000 by default, this URL will use the default HTTP port of 80 which will be handled by Nginx. + +3. **Test that it works** + + Now that we have TIB installed and configured, Nginx installed and hosting our custom login page, and the Dashboard configured to redirect to that login page we can now test the solution. Remember that this example is using the LDAP provided at forumsys.com, so if you are using your own LDAP then substitute the username and password with appropriate values from your system. + + 1. Open a web browser (if you're already logged in to the Dashboard, logout now) and attempt to access the Dashboard - `http://my-tyk-instance.com:3000` + 2. This should be redirected to the custom login page - `http://my-tyk-instance.com/login.html` + 3. Enter `read-only-admin` as the username + 4. Enter `password` as the password + 5. Submit the form + 6. You should now be logged in to the Dashboard + + +## Advance LDAP Configuration + +The LDAP Identity Provider gives you functionality to bind a user to an LDAP server based on a username and password configuration. The LDAP provider currently does not extract user data from the server to populate a user object, but will provide enough defaults to work with all handlers. + +### Log into the Dashboard using LDAP + +Below is a sample TIB profile that can be used to log a user into the Dashboard using an LDAP pass-through provider: + +```{.copyWrapper} +{ + "ActionType": "GenerateOrLoginUserProfile", + "ID": "4", + "OrgID": "{YOUR-ORG-ID}", + "IdentityHandlerConfig": { + "DashboardCredential": "ADVANCED-API-USER-API-TOKEN" + }, + "ProviderConfig": { + "FailureRedirect": "http://{DASH-DOMAIN}:{DASH-PORT}/?fail=true", + "LDAPAttributes": [], + "LDAPPort": "389", + "LDAPServer": "localhost", + "LDAPUserDN": "cn=*USERNAME*,cn=dashboard,ou=Group,dc=test-ldap,dc=tyk,dc=io" + }, + "ProviderName": "ADProvider", + "ReturnURL": "http://{DASH-DOMAIN}:{DASH-PORT}/tap", + "Type": "passthrough" +} + +``` + +The only step necessary to perform this is to send a POST request to the LDAP URL. + +TIB can pull a username and password out of a request in two ways: + +1. Two form fields called "username" and "password" +2. A basic auth header using the Basic Authentication standard form + +By default, TIB will look for the two form fields. To enable Basic Auth header extraction, add `"GetAuthFromBAHeader": true` to the `ProviderConfig` section. + +The request should be a `POST`. + +If you make this request with a valid user that can bind to the LDAP server, Tyk will redirect the user to the dashboard with a valid session. There's no more to it, this mechanism is pass-through and is transparent to the user, with TIB acting as a direct client to the LDAP provider. + + + +The `LDAPUserDN` field MUST contain the special `*USERNAME*` marker in order to construct the users DN properly. + + + + +### Generate an OAuth token using LDAP + + +The configuration below will take a request that is posted to TIB, authenticate it against LDAP, if the request is valid, it will redirect to the Tyk Gateway OAuth clients' `Redirect URI` with the token as a URL fragment: + +```{.copyWrapper} +{ + "ActionType": "GenerateOAuthTokenForClient", + "ID": "6", + "IdentityHandlerConfig": { + "DashboardCredential": "{DASHBAORD-API-ID}", + "DisableOneTokenPerAPI": false, + "OAuth": { + "APIListenPath": "{API-LISTEN-PATH}", + "BaseAPIID": "{BASE-API-ID}", + "ClientId": "{TYK-OAUTH-CLIENT-ID}", + "RedirectURI": "http://{APP-DOMAIN}:{PORT}/{AUTH-SUCCESS-PATH}", + "ResponseType": "token", + "Secret": "{TYK-OAUTH-CLIENT-SECRET}" + } + }, + "MatchedPolicyID": "POLICY-ID", + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "FailureRedirect": "http://{APP-DOMAIN}:{PORT}/failure", + "LDAPAttributes": [], + "LDAPPort": "389", + "LDAPServer": "localhost", + "LDAPUserDN": "cn=*USERNAME*,cn=dashboard,ou=Group,dc=ldap,dc=tyk-ldap-test,dc=com" + } + "ProviderName": "ADProvider", + "ReturnURL": "", + "Type": "passthrough" +} +``` + +This configuration is useful for internal APIs that require valid OAuth tokens (e.g.a webapp or mobile app) but needs validation by an LDAP provider. + +### Log into the Developer Portal using LDAP + + +LDAP requires little configuration, we can use the same provider configuration that we used to log into the Dashboard to target the Portal instead - notice the change in the handler configuration and the return URL: + +```{.copyWrapper} +{ + "ActionType": "GenerateOrLoginDeveloperProfile", + "ID": "5", + "IdentityHandlerConfig": { + "DashboardCredential": "822f2b1c75dc4a4a522944caa757976a" + }, + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "FailureRedirect": "http://{PORTAL-DOMAIN}:{PORTAL-PORT}/portal/login/", + "LDAPAttributes": [], + "LDAPPort": "389", + "LDAPServer": "localhost", + "LDAPUserDN": "cn=*USERNAME*,cn=dashboard,ou=Group,dc=test-ldap,dc=tyk,dc=io" + }, + "ProviderConstraints": { + "Domain": "", + "Group": "" + }, + "ProviderName": "ADProvider", + "ReturnURL": "http://{PORTAL-DOMAIN}:{PORTAL-PORT}/portal/sso/", + "Type": "passthrough" +} +``` + +Once again, a simple `POST` request is all that is needed to validate a user via an LDAP provider. + +### Using advanced LDAP search + +In some cases validation of a user CN is not enough, and it requires verifying if a user match some specific rules, like internal team ID. In this case TIB provides support for doing additional LDAP search check, and if result of this search returns only 1 record, it will pass the user. + +To make it work you need to specify 3 additional attributes in profile configuration file: + +* `LDAPBaseDN` - base DN used for doing LDAP search, for example `cn=dashboard,ou=Group` +* `LDAPFilter` - filter applied to the search, should include the `*USERNAME*`variable. For example: `((objectCategory=person)(objectClass=user)(cn=*USERNAME*))` +* `LDAPSearchScope` - This specifies the portion of the target subtree that should be considered. Supported search scope values include: 0 - baseObject (often referred to as "base"), 1 - singleLevel (often referred to as "one"), 2 - wholeSubtree (often referred to as "sub") + +For additional information about [LDAP search protocol](https://www.ldap.com/the-ldap-search-operation) + +Example profile using LDAP search filters: +```{.copyWrapper} +{ + "ActionType": "GenerateOAuthTokenForClient", + "ID": "2", + "IdentityHandlerConfig": { + "DashboardCredential": "ADVANCED-API-USER-API-TOKEN", + "DisableOneTokenPerAPI": false, + "OAuth": { + "APIListenPath": "oauth-1", + "BaseAPIID": "API-To-GRANT-ACCESS-TO", + "ClientId": "TYK-OAUTH-CLIENT-ID", + "RedirectURI": "http://your-app-domain.com/target-for-fragment", + "ResponseType": "token", + "Secret": "TYK-OAUTH-CLIENT-SECRET" + } + }, + "MatchedPolicyID": "POLICY-TO-ATTACH-TO-KEY", + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "FailureRedirect": "http://yourdomain.com/failure-url", + "LDAPAttributes": [], + "LDAPBaseDN": "cn=dashboard,ou=Group,dc=ldap,dc=tyk-test,dc=com", + "LDAPEmailAttribute": "mail", + "LDAPSearchScope": 2, + "LDAPFilter": "(&(objectcategory=user)(sAMAccountName=*USERNAME*)(memberOf=CN=RL - PAT - T1-00002,OU=Role,OU=Security Roles,DC=company,DC=net))", + "LDAPPort": "389", + "LDAPServer": "ldap.company.com", + "LDAPUserDN": "*USERNAME*@company.com" + }, + "ProviderName": "ADProvider", + "ReturnURL": "", + "Type": "passthrough" +} +``` + + diff --git a/api-management/single-sign-on-oidc.mdx b/api-management/single-sign-on-oidc.mdx new file mode 100644 index 000000000..14f4f7ccc --- /dev/null +++ b/api-management/single-sign-on-oidc.mdx @@ -0,0 +1,488 @@ +--- +title: "Single Sign On (SS0) with OpenID Connect (OIDC)" +description: "Learn how to integrate external services with Tyk API Gateway. Discover how to use middleware plugins, webhooks, and service discovery to extend your API functionality and connect with third-party systems." +keywords: "Tyk Identity Broker, TIB, Identity Provider, Identity Handler, SSO, Custom Authentication, Custom Proxy Provder, SAML, OIDC, OpenID Connect, Profies, IDPs, Social Provider, LDAP" +sidebarTitle: "SSO with OpenID Connect" +--- + +## Overview + +Single Sign-On (SSO) with OpenID Connect (OIDC) allows Tyk Dashboard or Developer Portal users to authenticate using their existing identity provider credentials, creating a seamless login experience. This integration leverages Tyk Identity Broker (TIB), which acts as a bridge between Tyk and various identity providers such as Auth0, Keycloak, and other OIDC-compatible systems. + +
+ + + +To activate SSO on the Dashboard or Developer portal, there’s no requirement to install TIB separately; it is integrated into the Dashboard and Developer Portal. You have two configurations for SSO within the dashboard: +1. **Using Embedded TIB**: No need to install it separately. +2. **Using External TIB**: If you are using a previous version of the Dashboard or Portal, you can still use SSO with TIB installed as a separate application. + + + +### Basic Implementation Steps + +To set up SSO with OIDC in Tyk: + +1. Access the Identity Manager under System Management in the Tyk Dashboard +2. Create a profile for your preferred IDP +3. Get the `client_id` + `secret` that are defined on your IDP +4. Set the `Callback URL` generated by Tyk on your IDP +5. Provide your SSO profile in Tyk with the `Discover URL (well known endpoint)` +6. Visit the Login URL after saving your profile to initialize the login +7. More Docs for the flow can be found on our [GitHub TIB repo README](https://github.com/TykTechnologies/tyk-identity-broker) and our [3rd Party integration docs](/api-management/external-service-integration) + + +{/* TODO: Add info about SSO and OIDC */} + +{/* TODO: Add some info and update grid */} + +## SSO with Azure Active Directory (AD) + +This is an end-to-end worked example of how you can use [AzureAD](https://www.microsoft.com/en-gb/security/business/identity-access/microsoft-entra-id) and our [Tyk Identity Broker (TIB)](https://tyk.io/docs/concepts/tyk-components/identity-broker/ +) to log in to your Dashboard. +This guide assumes the following: + +You already have authorized access to Tyk's Dashboard. If you haven't, get the authorization key by following this [guide](/api-management/user-management#using-dashboard-api). + +### Configuration at Azure + +1. Access your Azure Portal and navigate to the Azure Active Directory page. + +2. Go to app registrations and create or access an application you want to use for Dashboard access. + + - If you are creating an application, give it a name and register it + +3. Add a redirect URL to your application as callback to TIB in your Azure application: + + - In your app, either via the Authentication menu or the redirect URL shortcut navigate to and add the redirect to TIB in the Web category i.e. `http://localhost:3000/auth/{PROFILE-NAME-IN-TIB}/openid-connect/callback`. + + Redirect URL + +4. Go to Overview and add a secret in Client Credentials. Don't forget to copy the secret value, not the secretID. + + Overview + +Check Microsoft's [documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app) for more detail. + +### Configuration at Dashbaord + +1. Log in to your dashboard and select Identity Management, located under System Management +2. Create a profile and select OpenID Connect as the provider type +3. Under Profile Configuration, paste the secret value, clientID, and well-known endpoint URL from the Azure site. + - Profile Configuation may look something like this: + + Profile Configuration + + - The well-known endpoint URL is created by Azure and can be located by selecting Endpoints on their site + + Endpoints + +### Test your Azure Login: + +From the browser call `http://localhost:3000/auth/{PROFILE-NAME-IN-TIB}/openid-connect` +- If it's working you'll be redirected to Azures's web page and asked for your username and password. + + Username + + Password + +- If it's working you'll be redirected to Azures's web page and asked for your username and password. + + Dashboard + +### Enhancements + +Once it's working you can also add more enhancements such as automatic user group mapping from your AzureAD security groups or users groups to Tyk Dashboards groups. + +#### User group mapping + +Group mapping can be managed from Advanced Settings section of the Profile Configuration screen. + +Profile Configuration - Additional Options + +As illustrated in the screen below the following information must be provided: + +- Identity provider role +- Tyk User Group: This can be created from the User Groups section of the dashboard (reference a link to a page in tyk docs here to show how to create a user group). When creating your User Group, one can also select and adjust the permissions for each group. + +For more information on how to set and change user permissions, head to this [guide](/api-management/user-management#using-dashboard-ui-1) + +Profile Configuration - Raw-editor + +You can select the scopes you would like your request to include. By default, Tyk will provide the connectid scope, anything additional must be requested. + +### OpenID Connect Example + +For debugging purposes, you can find an example we created using the OpenID Connect playground. +1. Add the redirect url found on the OpenID Connect site to the redirect urls found under the Web section + + Access redirect urls + + Additional URL Added + +2. Copy the OpenID Connect endpoint from the Azure site +3. On the OpenID Connect site select Edit. In the Server Template dropdown menu select the Custom option and paste the endpoint in the Discovery Document URL. + + Edit Button + + Custom Dropdown + +4. Press the Use Discovery Document button and this will autofill Authorization Token Endpoint, Token Endpoint, and Token Keys Endpoint + + Discovery Document + +5. Copy and paste the Client ID and Client Secret from the Azure site to your ConnectID. Scope is autofilled for you and save the configuration. + + Client ID and Secret +6. Press start at the bottom of the Request window and if done correctly, this should prompt you to sign in to your Azure account. + + OpenID Connect Step 2 +7. You should then be redirected back to OpenID Connect where you'll be shown the Exchange Code. This needs to be turned into an access token. Press the exchange button under the request and then press Next. + + OpenID Connect Step 3 + OpenID Connect Step 4 +8. We can then verify this by pressing the verify button. We can also view the information or scope of what is being returned by heading to jwt.io and viewing the payload: data there. + + OpenID Connect Step 5 +9. We are given an object with key, value pairs and we can pass in the key ie. name to our Custom User Group and the value of to our Identity Provider Role in our Tyk dashboard as shown in the example above. + + OpenID Connect Step 6 + +To try this yourself, we have included the link: https://openidconnect.net/ + +## SSO with Okta + +This is an end-to-end worked example of how you can use [Okta](https://www.okta.com/) and the Tyk Identity Broker to log into your Dashboard. +This guide assumes the following: + +* You already have authorized access to Tyk's Dashboard. If you haven't, [get the authorization key by following this doc](/api-management/user-management#using-dashboard-api). +* For simplicity, you are running TIB locally on port 3010 +* You are able to edit TIB's configuration file. + + +### Configuration at Okta + +1. Create a developer account on the [Okta Developer site](https://developer.okta.com/). + You'll get a domain such as `https://.okta.com/.well-known/openid-configuration` +2. Login and create a Web Application as follows: + - Under `Application`, click `Add Application` + - Choose `Web` + - Change the name of the app + - Tick `Authorization Code` + - Click `Done` + + Note: These instruction are for the new Okta's `Developer Console`, for the `Classic UI` instructions are slightly different. + + +3. Add a callback to TIB in your application: + - Under `General`, click `Edit` and update the `Login redirect URIs` field with the endpoint on TIB `http://localhost:3010/auth/{PROFILE-NAME-IN-TIB}/openid-connect/callback`. + - `{PROFILE-NAME-IN-TIB}` - this can be any string you choose, as long as you use the same one for the profile in TIB. + +4. Permissions to login via Okta: + Under the `Assignments` tab, make sure group assignments is set to *everyone* (for now, you will change this later!). + +5. This is how it should look like after step #4 +okta-create-app + +### Configuration at TIB + +6. Set the profile in `profiles.json` as follows: + - Copy from your Okta client the `cliend ID` to `ProviderConfig.UseProviders[].key` + - Copy from your Okta client the `Client secret` to `ProviderConfig.UseProviders[].secret` + - Add Okta's discovery url `"https://.okta.com/.well-known/openid-configuration"` to `ProviderConfig.UseProviders[].DiscoverURL` + + Example of a `profiles.json` file: +```{.json} +[{ + "ActionType": "GenerateOrLoginUserProfile", + "ID": "{PROFILE-NAME-IN-TIB}", + "OrgID": "5a54a74550200d0001975584", + "IdentityHandlerConfig": { + "DashboardCredential": "{DASHBOARD-SECRET}" + }, + "ProviderConfig": { + "CallbackBaseURL": "http://{TIB-DOMAIN}:{TIB-PORT}", + "FailureRedirect": "http://{DASHBOARD-DOMAIN}:{DASHBOARD-PORT}/?fail=true", + "UseProviders": [ + { + "Key": "{Okta-App-Client-ID}", + "Secret": "{Okta-App-Client-SECRET}", + "Scopes": ["openid", "email"], + "DiscoverURL": "https://.okta.com/.well-known/openid-configuration", + "Name": "openid-connect" + } + ] + }, + "ProviderName": "SocialProvider", + "ReturnURL": "http://{DASHBOARD-DOMAIN}:{DASHBOARD-PORT}/tap", + "Type": "redirect" +}] +``` + +7. Start TIB by running the binary (`profiles.json` is in the same CWD) + See [Install TIB](/api-management/external-service-integration) for detailed instructions on how to install TIB +8. Test that it works: + From the broswer call `http://localhost:3010/auth/{PROFILE-NAME-IN-TIB}/openid-connect` + - If it's working you'll be redirected to Okta's web page and will be asked to enter your Okta user name and password. + - If you were successfully authenticated by Okta then you'll be redirected to the Tyk Dashboard and login into it without going through the login page. Job's done! +9. If you need to update your profile then you can use TIB's REST API as follows: + +```{.copyWrapper} +curl http://{TIB-DOMAIN}:{TIB-PORT}/api/profiles/{PROFILE-NAME-IN-TIB} -H "Authorization: {MY-SECRET}" -H "Content-type: application/json" -X PUT --data "@./my-new-dashboard-profile.json" | prettyjson +``` + + - POST and DELETE calls apply as normal + - You can post a few profiles to TIB. + - See [TIB REST API](/tyk-identity-broker/tib-rest-api) for more details. + +### Understanding the flow + 1. The initial call to the endpoint on TIB was redirected to Okta + 2. Okta identified the user + 3. Okta redirected the call back to TIB endpoint (according to the callback you set up on the client earlier in step 3) and from TIB + 4. TIB, via REST API call to the dashboard, created a nonce and a special session attached to it. + 5. TIB redirected the call to the dashboard to a special endpoint `/tap` ( it was defined on the profile under `ReturnURL`) with the nonce that was created. + 6. The Dashboard on the `/tap` endpoint finds the session that is attached to the `nonce`, login the user and redirect to the dashboard first page + +### Enabling MFA and SSO + +Once it's working you can also add two more enhancements - SSO and MFA + +#### SSO login into the Dashboard via a login page + You will need to: + - set up a web server with a login page and a form for `user` and `password` + - Update `tyk_analytics.conf` to redirect logins to that url + Explicit details are in [steps 6-7](/api-management/single-sign-on-ldap#create-login-page) + +#### Multi-Factor-Authentication (MFA) Support + MFA works out-of-the-box in Tyk since luckily Okta supports it. you would need to add it to the configuration of the account holder. Under `Security --> Multifactor --> Factor types` you can choose the types you want. For instance I chose Google Authenticator. + + 1. While trying to login to the Dashboard, Okta enforced the MFA and asked me to use the Google Authenticator: + okta-mfa-setup-1 + + 2. I had to download the Google Authenticator and identify with the generated code + okta-mfa-download-google-authenticator-2 + 3. I successfully authenticated with Google Authenticator + okta-mfa-google-auth-approved-3 + +### Common Error +If you get a `400 Bad Request` it means the profile name in the login endpoint is not identical to the profile name in the callback that you set up on Okta's app: + +- On Okta's app - `Login redirect URIs:` `http://localhost:3010/auth/{PROFILE-NAME-IN-TIB}/openid-connect/callback`. +- The endpoint to test - `http://localhost:3010/auth/{PROFILE-NAME-IN-TIB}/openid-connect` + +okta-bad-request-wrong-callback + +## SSO with Auth0 + +This will walk you through securing access to your Tyk Dashboard using OpenID Connect (OIDC) identity tokens with Auth0. We also have the following video that will walk you through the process. + + + +**Prerequisites** + +* A free account with [Auth0](https://auth0.com/) +* A Tyk Self-Managed or Cloud installation +* Our Tyk Identity Broker (TIB). You can use the internal version included with a Tyk Self-Managed installation and Tyk Cloud, or an external version. See [Tyk Identity Broker](/api-management/external-service-integration#what-is-tyk-identity-broker-tib) for more details. + +### Create a new user in Auth0 + +1. Log in to your Auth0 account. +2. Select **Users** from the **User Management** menu. + +Auth0 Create User + +3. Click Create User and complete the new user form, using the default **Username-Password-Authentication** Connection method. +4. Click Create to save your new user. +Auth0 User profile + +### Create an Auth0 application + +You will use settings from your Auth0 application within the Tyk Dashboard Identity profile you will create. + +1. Select Applications from the Auth0 menu. +Auth0 Applications +2. Click **Create Application**. +3. Give your application a name and select **Regular Web Application** from the applications types. +Auth0 Application information +4. Click **Create**. +5. After you application has been created select the **Basic Information** tab. +Auth0 Application Basic information +6. You will use the **Domain**, **Client Id** and **Client Secret** values in the Identity profile you create next in the Tyk Dashboard. + +### Create an Identity Management profile in your Dashboard + +1. Log in to your Tyk Dashboard as an Admin user. +2. Select **Identity Management** from the **System Management** menu. +Create Identity profile +3. Click **Create Profile**. +4. In the **Profile action** section enter a name for your profile and make sure the **Login to Tyk Dashboard** option is selected. +Identity Profile action settings +5. Click Next. In the **Provider type** section, select **OpenID Connect**. +Identity profile Provider type +6. Click Next. Copy the **Client ID** value from your **Auth0 application** > **Basic Information** and paste it in the **Client ID / Key** field. +7. Copy the **Client Secret** value from your **Auth0 application** > **Basic Information** and paste it in the **Secret** field. +8. You need to add a **Discover URL (well known endpoint)**. Use the following URL, replacing `<>` with the **Domain** value from your **Auth0 application** > **Basic Information**. + + `https://<>/.well-known/openid-configuration` + + Tyk new identity profile configuration + +9. Copy the **Callback URL** and paste it into the **Allowed Callback URLs** field in your **Auth0 application** > **Basic Information**. +Auth0 Allowed Callback URLs +10. Click **Save Changes** to update your Auth0 Application. +11. Click **Create Profile** to save your Identity profile in your Tyk Dashboard. + +### Test your Auth0 Login + +1. From your **Identity Management Profiles** click the profile you created to open it. +Tyk Identity Profiles +2. Click the **Login URL**. +Tyk Identity Profile Config +3. You will now see the Auth0 login form in a browser tab. +Auth0 login form +4. Enter the email address and password of your Auth0 user. +5. You may be asked to authorize your Auth0 application. +Accept Auth0 application +6. Click **Accept**. +7. You will now be taken to the Tyk Dashboard. +Tyk Dashboard from Auth0 SSO login + +## SSO with Keycloak + +This is a walk-through of how you can use [Keycloak](https://www.keycloak.org) and our (internal/embedded) Tyk Identity Broker (TIB) to log in to your Dashboard. This guide assumes you have existing Keycloak and Tyk Pro Environments. + +### Configuration at KeyCloak + +1. In your desired Realm, create a client of OpenID Connect type, and set your desired Client ID. + + Create Client + + Set Client Type and ID + + +2. Enable client authentication, then save the client. + + Enable Client Auth + + +3. Retrieve the Secret (from the credentials tab) of the Client you just created. You will need the Client ID and Secret in later steps. + + Retrieve Client Secret + +4. Retrieve the discovery endpoint of the realm, `https:///.well-known/openid-configuration`. + + This is accessible from β€œRealm Settings” > β€œGeneral” Tab > OpenID Endpoint Configuration. You will need it in later steps. + + Keycloak discovery endpoint + + +### Configuration at Dashboard + +1. Log in to your Dashboard and select Identity Management, located under System Management + + Select Identity Management + + +2. Create a profile, give it a name and select β€œLogin to Tyk Dashboard” + + Create a profile + + +3. Set the provider type as β€œOpenID Connect” + + OpenID Connect provider type + + +4. Fill in the Client ID, Client Secret and Discovery URL/endpoint from Keycloak (from steps 3 and 4 in Keycloak's Side) + +5. Copy the callback URL from Tyk and then you can click "Create Profile" to save the profile. + + Copy callback URL + + +6. Go to Keycloak, and paste the callback URL you just copied to β€œValid redirect URIs” in the Keycloak Client, and then save the client. + + This can be accessed by selecting the "Settings" tab when viewing a Keycloak client. + + Add Redirect URL to keycloak client + + +### Test Keycloak Login + +1. From your **Identity Management Profiles** click the profile you created to open it. + +2. Copy the **Login URL** and paste it into a browser tab + Copy login url + +3. You will now see the Keycloak login form. + Login to keycloak + +4. Enter the email address and password of your Keycloak user. + +5. You should now be redirected to the Tyk Dashboard and logged in + Tyk Dashboard from Keycloak SSO login + + +## JSON Web Encryption with OIDC + +**Prerequisites** + +- Tyk Identity Broker v1.6.1+ or Tyk Dashboard v5.7.0+ (JWE feature is available from these versions and in all subsequent releases). +- An Identity Provider (IdP) that supports JSON Web Encryption (JWE) +- A certificate with a private key for Tyk (used to decrypt the ID token) +- A public key file for the IdP (used to encrypt the ID token) + +### Steps for Configuration + +1. **Prepare Encryption Keys** + + - Load the certificate with the private key into Tyk: + - **For embedded TIB in Dashboard:** Use Tyk Dashboard's certificate manager. In the below image you can see the module in dashboard that allows to upload certificates: + Certificate manager + - **For standalone TIB:** Store the certificate as a file accessible to Tyk + + - Load the public key into your IdP for ID token encryption (process varies by IdP) + +2. **Configure the Identity Provider** + - Create a new client in your IdP for Tyk Identity Broker + +3. **Setup OIDC Profile** + + - Create a new [TIB profile](/api-management/external-service-integration#exploring-tib-profiles): + - Select Social > OIDC as the provider + - Enter the client key and client secret from the IdP + - Copy the callback URL from TIB and add it to the IdP client's allowed redirect URLs + Profile creation + - Test the basic SSO flow to ensure it's working correctly + +4. **Enable JWE** + + - [Updated the TIB profile via API](/tyk-identity-broker/tib-rest-api#update-profile) + - Add the following fields to the `ProviderConfig` section: + + ```json + ... + "ProviderConfig": { + "JWE": { + "Enabled": true, + "PrivateKeyLocation": "CERT-ID" + }, + ... + ``` + + - Set `PrivateKeyLocation` to either: + - The certificate ID from the certificate manager, or + - The file path where the certificate and private key are stored + + - Update the IdP client configuration + - Enable JWE for the client + - Provide the public key for encryption + +5. **Verification** + - Test the complete flow with JWE enabled to ensure proper functionality. + +### Troubleshooting +While setting up JWE with Tyk Identity Broker, you may encounter some challenges. This section outlines common issues and their solutions to help you navigate the implementation process smoothly. + +1. **oauth2: error decoding JWT token: jws: invalid token received, not all parts available** it means that JWE is not enabled in the profile and the IDP is already using JWE. +2. **JWE Private Key not loaded** Tyk encountered some issues while loading the certificate with the private key. Ensure that the path or certId are correct. \ No newline at end of file diff --git a/api-management/single-sign-on-saml.mdx b/api-management/single-sign-on-saml.mdx new file mode 100644 index 000000000..940300c10 --- /dev/null +++ b/api-management/single-sign-on-saml.mdx @@ -0,0 +1,65 @@ +--- +title: "Single Sign On (SS0) with SAML" +description: "Learn how to integrate external services with Tyk API Gateway. Discover how to use middleware plugins, webhooks, and service discovery to extend your API functionality and connect with third-party systems." +keywords: "Tyk Identity Broker, TIB, Identity Provider, Identity Handler, SSO, Custom Authentication, Custom Proxy Provder, SAML, OIDC, OpenID Connect, Profies, IDPs, Social Provider, LDAP" +sidebarTitle: "SSO with SAML" +--- + +## SSO with SAML + +SAML authentication is a way for a service provider, such as the Tyk Dashboard or Portal, to assert the Identity of a User via a third party. + +Tyk Identity Broker can act as the go-between for the Tyk Dashboard and Portal and a third party identity provider. Tyk Identity broker can also interpret and pass along information about the user who is logging in such as Name, Email and group or role metadata for enforcing role based access control in the Tyk Dashboard. + +The provider config for SAML has the following values that can be configured in a Profile: + +`SAMLBaseURL` - The host of TIB that will be used in the metadata document for the Service Provider. This will form part of the metadata URL used as the Entity ID by the IDP. The redirects configured in the IDP must match the expected Host and URI configured in the metadata document made available by Tyk Identity Broker. + +`FailureRedirect` - Where to redirect failed login requests. + +`IDPMetaDataURL` - The metadata URL of your IDP which will provide Tyk Identity Broker with information about the IDP such as EntityID, Endpoints (Single Sign On Service Endpoint, Single Logout Service Endpoint), its public X.509 cert, NameId Format, Organization info and Contact info. + +This metadata XML can be signed providing a public X.509 cert and the private key. + +`CertLocation`: An X.509 certificate and the private key for signing your requests to the IDP, this should be one single file with the cert and key concatenated. When using internal identity broker, this value should be the id of the certificate uploaded via certificate manager in dashboard, otherwise it should be a path where the certificate is placed. + +`ForceAuthentication` - Ignore any session held by the IDP and force re-login every request. + +`SAMLEmailClaim` - Key for looking up the email claim in the SAML assertion form the IDP. Defaults to: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress` + +`SAMLForenameClaim` - Key for looking up the forename claim in the SAML assertion form the IDP. Defaults to: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/forename` + +`SAMLSurnameClaim` - Key for looking up the surname claim in the SAML assertion form the IDP. Defaults to: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname` + +Example profile configuration: + +``` +{ + "ActionType": "GenerateOrLoginUserProfile", + "ID": "saml-sso-login", + "OrgID": "{YOUR_ORGANIZATION_ID}", + "CustomEmailField": "", + "IdentityHandlerConfig": { + "DashboardCredential": "{DASHBOARD_USER_API_KEY}" + }, + "ProviderConfig": { + "SAMLBaseURL": "https://{HOST}", + "FailureRedirect": "http://{DASHBOARD_HOST}:{PORT}/?fail=true", + "IDPMetaDataURL": "{IDP_METADATA_URL}", + "CertLocation":"myservice.cert", + "ForceAuthentication": false, + "SAMLEmailClaim": "", + "SAMLForenameClaim": "", + "SAMLSurnameClaim": "" + }, + "ProviderName": "SAMLProvider", + "ReturnURL": "http://{DASHBOARD_URL}:{PORT}/tap", + "Type": "redirect" +} +``` +### Video Demonstration + +We have a video that walks you through getting Tyk Dashboard SSO Access via SAML using Microsoft Azure as IDP and our internal Dashboard TIB. + + + diff --git a/api-management/single-sign-on-social-idp.mdx b/api-management/single-sign-on-social-idp.mdx new file mode 100644 index 000000000..6bad71349 --- /dev/null +++ b/api-management/single-sign-on-social-idp.mdx @@ -0,0 +1,154 @@ +--- +title: "SSO with Social Identity Providers" +description: "Learn how to integrate external services with Tyk API Gateway. Discover how to use middleware plugins, webhooks, and service discovery to extend your API functionality and connect with third-party systems." +keywords: "Tyk Identity Broker, TIB, Identity Provider, Identity Handler, SSO, Custom Authentication, Custom Proxy Provder, SAML, OIDC, OpenID Connect, Profies, IDPs, Social Provider, LDAP" +sidebarTitle: "SSO with Social IDP" +--- + +## Introduction + +The social provider for the Tyk Identity Broker is a thin wrapper around the excellent `goth` social auth library, modified slightly to work with a multi-tenant structure. The social provider should provide seamless integration with: + +* Bitbucket +* Digital Ocean +* Dropbox +* GitHub +* Google+ +* Linkedin +* Twitter +* Salesforce + +The social provider is ideal for SSO-style logins for the Dashboard or for the Portal. For certain providers (mainly Google+), where email addresses are returned as part of the user data, a constraint can be added to validate the users domain. This is useful for Google For Business Apps users that want to grant access to their domain users for the Dashboard. + +For more social provider examples see the Tyk Identity Broker (TIB) v0.2 Repo [Readme](https://github.com/TykTechnologies/tyk-identity-broker/blob/master/README.md#social). +
+ + + +To activate SSO on the Dashboard or Developer portal, there’s no requirement to install TIB separately; it is integrated into the Dashboard and Developer Portal. You have two configurations for SSO within the dashboard: +1. **Using Embedded TIB**: No need to install it separately. +2. **Using External TIB**: If you are using a previous version of the Dashboard or Portal, you can still use SSO with TIB installed as a separate application. + + + + +## Log into an APP with Github OAuth + + + +## Log into an APP with Google (Oauth) + +A common use case for Tyk Gateway users is to enable users to log into a web app or mobile app using a social provider such as Google, but have that user use a token in the app that is time-delimited and issued by their own API (or in this case, Tyk). + +Tyk can act as an OAuth provider, but requires some glue code to work, in particular, generating a token based on the authentication of a third party, which needs to run on a server hosted by the owner of the application. This is not ideal in many scenarios where authentication has been delegated to a third-party provider (such as Google or Github). + +In this case, we can enable this flow with Tyk Gateway by Using TIB. + +What the broker will do is essentially the final leg of the authentication process without any new code, simply sending the user via TIB to the provider will suffice for them to be granted an OAuth token once they have authenticated in a standard, expected OAuth pattern. + +Assuming we have created a client ID and secret in Google Apps to grant ourselves access to the users data, we need those details, and some additional ones from Tyk itself. + +### To Set up an OAuth client with Google Apps + +1. Go to the [Google Developer Console](https://console.developers.google.com/) and create a new app +2. Register a new OAuth client. Let's call it WebApp 1 (Select "New Credentials -> OAuth Client ID") +3. Select Web App +4. Add the following URL (modify for your domain) to the "Authorized redirect URIs" section: `http://tib-hostname:TIB-PORT/auth/{PROFILE-ID}/gplus/callback` + +### Create an OAuth Client in Tyk Dashboard + +TIB will use the OAuth credentials for GPlus to access and authenticate the user, it will then use another set of client credentials to make the request to Tyk to generate a token response and redirect the user, this means we need to create an OAuth client in Tyk Dashboard before we can proceed. + +One quirk with the Tyk API is that requests for tokens go via the base APIs listen path (`{listen_path}/toauth/authorize`), so we will need to know the listen path and ID of this API so TIB can make the correct API calls on your behalf. + +```{.copyWrapper} +{ + "ActionType": "GenerateOAuthTokenForClient", + "ID": "3", + "IdentityHandlerConfig": { + "DashboardCredential": "{DASHBOARD-API-ID}", + "DisableOneTokenPerAPI": false, + "OAuth": { + "APIListenPath": "{API-LISTEN-PATH}", + "BaseAPIID": "{BASE-API-ID}", + "ClientId": "{TYK-OAUTH-CLIENT-ID}", + "RedirectURI": "http://{APP-DOMAIN}:{PORT}/{AUTH-SUCCESS-PATH}", + "ResponseType": "token", + "Secret": "{TYK-OAUTH-CLIENT-SECRET}" + } + }, + "MatchedPolicyID": "567a86f630c55e3256000003", + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "CallbackBaseURL": "http://{TIB-DOMAIN}:{TIB-PORT}", + "FailureRedirect": "http://{PORTAL-DOMAIN}:{PORTAL-PORT}/portal/login/?fail=true", + "UseProviders": [{ + "Key": "GOOGLE-OAUTH-CLIENT-KEY", + "Name": "gplus", + "Secret": "GOOGLE-OAUTH-CLIENT-SECRET" + }] + }, + "ProviderConstraints": { + "Domain": "", + "Group": "" + }, + "ProviderName": "SocialProvider", + "ReturnURL": "", + "Type": "redirect" +} +``` + +There's a few new things here we need to take into account: + +* `APIListenPath`: This is the listen path of your API, TIB uses this to generate the OAuth token. +* `BaseAPIID`: The base API ID for the listen path mentioned earlier, this forms the basic access grant for the token (this will be superseded by the `MatchedPolicyID`, but is required for token generation). +* `ClientId`: The client ID for this profile within Tyk Gateway. +* `Secret`: The client secret for this profile in Tyk Gateway. +* `RedirectURI`: The Redirect URL set for this profile in the Tyk Gateway. +* `ResponseType`: This can be `token` or `authorization_code`, the first will generate a token directly, the second will generate an auth code for follow up access. For SPWA and Mobile Apps it is recommended to just use `token`. + +When TIB successfully authorizes the user, and generates the token using the relevant OAuth credentials, it will redirect the user to the relevant redirect with their token or auth code as a fragment in the URL for the app to decode and use as needed. + +There is a simplified flow, which does not require a corresponding OAuth client in Tyk Gateway, and can just generate a standard token with the same flow. + +## Log into Dashboard with Google + +Similarly to logging into an app using Tyk, OAuth and Google Plus, if we have our callback URL and client IDs set up with Google, we can use the following profile setup to access our Dashboard using a social provider: + +```{.copyWrapper} +{ + "ActionType": "GenerateOrLoginUserProfile", + "ID": "2", + "IdentityHandlerConfig": null, + "MatchedPolicyID": "1C", + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "CallbackBaseURL": "http://:{TIB-PORT}", + "FailureRedirect": "http://{DASH-DOMAIN}:{DASH-PORT}/?fail=true", + "UseProviders": [{ + "Name": "gplus", + "Key": "GOOGLE-OAUTH-CLIENT-KEY", + "Secret": "GOOGLE-OAUTH-CLIENT-SECRET" + }] + }, + "ProviderConstraints": { + "Domain": "yourdomain.com", + "Group": "" + }, + "ProviderName": "SocialProvider", + "ReturnURL": "http://{DASH-DOMAIN}:{DASH-PORT}/tap", + "Type": "redirect" +} +``` + +The login to the Dashboard makes use of a one-time nonce to log the user in to the session. The nonce is only accessible for a few seconds. It is recommended that in production use, all of these transactions happen over SSL connections to avoid MITM snooping. + +`Domain` constraint ensures that only users from `yourdomain.com` domain-based email accounts are allowed to login. + Replace it with correct domain or remove this section if you don't want to set this constraint. + + +When TIB successfully authorizes the user, and generates the token using the relevant OAuth credentials, it will redirect the user to the relevant redirect with their token or auth code as a fragment in the URL for the app to decode and use as needed. + +There is a simplified flow, which does not require a corresponding OAuth client in Tyk Gateway, and can just generate a standard token with the same flow. + + diff --git a/api-management/stream-config.mdx b/api-management/stream-config.mdx new file mode 100644 index 000000000..43951e5b7 --- /dev/null +++ b/api-management/stream-config.mdx @@ -0,0 +1,6720 @@ +--- +title: "Tyk Streams Configuration" +description: "How to configure Tyk Streams" +keywords: "Broker, Input, Output, HTTP Client, HTTP Server, Processors, Scanners, CSV, Lines, Regular Expression, Switch, Avro, Kafka" +sidebarTitle: "Tyk Streams Reference" +--- + +## Overview + +Tyk streams configuration is specified using YAML. The configuration consists of several main sections: *input*, *pipeline*, *output* and optionally *logger*. + +### Input + +The input section defines the publisher source of the data stream. Tyk Streams supports various input types such as Kafka, HTTP, MQTT etc. Each input type has specific configuration parameters. + +```yaml +input: + kafka: + addresses: + - localhost:9092 + topics: + - example_topic + consumer_group: example_group + client_id: example_client +``` + +### Pipeline + +The pipeline section defines the processing steps applied to the data. It includes processors for filtering, mapping, enriching and transforming the data. Processors can be chained together. + +```yaml +pipeline: + processors: + - mapping: | + root = this + root.foo = this.bar.uppercase() + - json_schema: + schema_path: "./schemas/example_schema.json" +``` + +### Output + +The output section specifies the destination of the processed data. Similar to inputs, Tyk Streams supports various output types like Kafka, HTTP etc. + +```yaml +output: + kafka: + addresses: + - localhost:9092 + topic: output_topic + client_id: example_output_client +``` + +### Logger (Optional) + +The logger section is used to configure logging options, such as log level and output format. + +```yaml +logger: + level: INFO + format: json +``` + +## Inputs + +### Overview + +An input is a source of data piped through an array of optional [processors](/api-management/stream-config#overview-3): + +```yaml +input: + label: my_kafka_input + + kafka: + addresses: [ localhost:9092 ] + topics: [ foo, bar ] + consumer_group: foogroup + + # Optional list of processing steps + processors: + - avro: + operator: to_json +``` + +#### Brokering + +Only one input is configured at the root of a Tyk Streams config. However, the root input can be a [broker](/api-management/stream-config#broker) which combines multiple inputs and merges the streams: + +```yaml +input: + broker: + inputs: + - kafka: + addresses: [ localhost:9092 ] + topics: [ foo, bar ] + consumer_group: foogroup + + - http_client: + url: https://localhost:8085 + verb: GET + stream: + enabled: true +``` + +#### Labels + +Inputs have an optional field `label` that can uniquely identify them in observability data such as logs. + +{/* TODO + +When know if Tyk Streams will support metrics then link to metrics + +Inputs have an optional field `label` that can uniquely identify them in observability data such as metrics and logs. This can be useful when running configs with multiple inputs, otherwise their metrics labels will be generated based on their composition. For more information check out the [metrics documentation][metrics.about]. */} + +### Broker + +Allows you to combine multiple inputs into a single stream of data, where each input will be read in parallel. + +#### Common + +```yml +# Common config fields, showing default values +input: + label: "" + broker: + inputs: [] # No default (required) + batching: + count: 0 + byte_size: 0 + period: "" + check: "" +``` + +#### Advanced + +```yml +# All config fields, showing default values +input: + label: "" + broker: + copies: 1 + inputs: [] # No default (required) + batching: + count: 0 + byte_size: 0 + period: "" + check: "" + processors: [] # No default (optional) +``` + +A broker type is configured with its own list of input configurations and a field to specify how many copies of the list of inputs should be created. + +Adding more input types allows you to combine streams from multiple sources into one. For example, reading from both RabbitMQ and Kafka: + +```yaml +input: + broker: + copies: 1 + inputs: + - amqp_0_9: + urls: + - amqp://guest:guest@localhost:5672/ + consumer_tag: tyk-consumer + queue: tyk-queue + + # Optional list of input specific processing steps + processors: + - mapping: | + root.message = this + root.meta.link_count = this.links.length() + root.user.age = this.user.age.number() + + - kafka: + addresses: + - localhost:9092 + client_id: tyk_kafka_input + consumer_group: tyk_consumer_group + topics: [ tyk_stream:0 ] +``` + +If the number of copies is greater than zero the list will be copied that number of times. For example, if your inputs were of type foo and bar, with 'copies' set to '2', you would end up with two 'foo' inputs and two 'bar' inputs. + +##### Batching + +It's possible to configure a [batch policy](/api-management/stream-config#batch-policy) with a broker using the `batching` fields. When doing this the feeds from all child inputs are combined. Some inputs do not support broker based batching and specify this in their documentation. + +##### Processors + +It is possible to configure processors at the broker level, where they will be applied to *all* child inputs, as well as on the individual child inputs. If you have processors at both the broker level *and* on child inputs then the broker processors will be applied *after* the child nodes processors. + +#### Fields + +##### copies + +Whatever is specified within `inputs` will be created this many times. + + +Type: `int` +Default: `1` + +##### inputs + +A list of inputs to create. + + +Type: `array` + +##### batching + +Allows you to configure a [batching policy](/api-management/stream-config#batch-policy). + + +Type: `object` + +```yml +# Examples + +batching: + byte_size: 5000 + count: 0 + period: 1s + +batching: + count: 10 + period: 1s + +batching: + check: this.contains("END BATCH") + count: 0 + period: 1m +``` + +##### batching.count + +A number of messages at which the batch should be flushed. If `0` disables count based batching. + + +Type: `int` +Default: `0` + +##### batching.byte_size + +An amount of bytes at which the batch should be flushed. If `0` disables size based batching. + + +Type: `int` +Default: `0` + +##### batching.period + +A period in which an incomplete batch should be flushed regardless of its size. + + +Type: `string` +Default: `""` + +```yml +# Examples + +period: 1s + +period: 1m + +period: 500ms +``` + +##### batching.check + +A Bloblang query that should return a boolean value indicating whether a message should end a batch. + + +Type: `string` +Default: `""` + +```yml +# Examples + +check: this.type == "end_of_transaction" +``` + +##### batching.processors + +A list of processors to apply to a batch as it is flushed. This allows you to aggregate and archive the batch however you see fit. Please note that all resulting messages are flushed as a single batch, therefore splitting the batch into smaller batches using these processors is a no-op. + + +Type: `array` + +```yml +# Examples + +processors: + - archive: + format: concatenate + +processors: + - archive: + format: lines + +processors: + - archive: + format: json_array +``` + +### Http Client + +Connects to a server and continuously performs requests for a single message. + +#### Common + +```yml +# Common config fields, showing default values +input: + label: "" + http_client: + url: "" # No default (required) + verb: GET + headers: {} + timeout: 5s + payload: "" # No default (optional) + stream: + enabled: false + reconnect: true + auto_replay_nacks: true +``` + +#### Advanced + +```yml +# All config fields, showing default values +input: + label: "" + http_client: + url: "" # No default (required) + verb: GET + headers: {} + metadata: + include_prefixes: [] + include_patterns: [] + dump_request_log_level: "" + oauth: + enabled: false + consumer_key: "" + consumer_secret: "" + access_token: "" + access_token_secret: "" + oauth2: + enabled: false + client_key: "" + client_secret: "" + token_url: "" + scopes: [] + endpoint_params: {} + basic_auth: + enabled: false + username: "" + password: "" + jwt: + enabled: false + private_key_file: "" + signing_method: "" + claims: {} + headers: {} + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] + extract_headers: + include_prefixes: [] + include_patterns: [] + timeout: 5s + retry_period: 1s + max_retry_backoff: 300s + retries: 3 + backoff_on: + - 429 + drop_on: [] + successful_on: [] + proxy_url: "" # No default (optional) + payload: "" # No default (optional) + drop_empty_bodies: true + stream: + enabled: false + reconnect: true + auto_replay_nacks: true +``` + +##### Streaming + +If you enable streaming then Tyk Streams will consume the body of the response as a continuous stream of data. This allows you to consume APIs that provide long lived streamed data feeds (such as Twitter). + +##### Pagination + +This input supports interpolation functions in the `url` and `headers` fields where data from the previous successfully consumed message (if there was one) can be referenced. This can be used in order to support basic levels of pagination. + +#### Examples + +##### Basic Pagination + +Interpolation functions within the `url` and `headers` fields can be used to reference the previously consumed message, which allows simple pagination. + +```yaml +input: + http_client: + url: >- + http://api.example.com/search?query=allmyfoos&start_time=${! ( + (timestamp_unix()-300).ts_format("2006-01-02T15:04:05Z","UTC").escape_url_query() + ) }${! ("&next_token="+this.meta.next_token.not_null()) | "" } + verb: GET +``` + +{/* Update example when Tyk secrets Stream release has been performed + +```yaml +input: + http_client: + url: >- + http://api.example.com/search?query=allmyfoos&start_time=${! ( + (timestamp_unix()-300).ts_format("2006-01-02T15:04:05Z","UTC").escape_url_query() + ) }${! ("&next_token="+this.meta.next_token.not_null()) | "" } + verb: GET + rate_limit: foo_searches + # oauth2: + # enabled: true + # token_url: https://api.example.com/oauth2/token + # client_key: "${EXAMPLE_KEY}" + # client_secret: "${EXAMPLE_SECRET}" + +rate_limit_resources: + - label: foo_searches + local: + count: 1 + interval: 30s +``` */} + +#### Fields + +##### url + +The URL to connect to. + + +Type: `string` + +##### verb + +A verb to connect with + + +Type: `string` +Default: `"GET"` + +```yml +# Examples + +verb: POST + +verb: GET + +verb: DELETE +``` + +##### headers + +A map of headers to add to the request. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `object` +Default: `{}` + +```yml +# Examples + +headers: + Content-Type: application/octet-stream + traceparent: ${! tracing_span().traceparent } +``` + +##### metadata + +Specify optional matching rules to determine which metadata keys should be added to the HTTP request as headers. + + +Type: `object` + +##### metadata.include_prefixes + +Provide a list of explicit metadata key prefixes to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_prefixes: + - foo_ + - bar_ + +include_prefixes: + - kafka_ + +include_prefixes: + - content- +``` + +##### metadata.include_patterns + +Provide a list of explicit metadata key regular expression (re2) patterns to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_patterns: + - .* + +include_patterns: + - _timestamp_unix$ +``` + +##### dump_request_log_level + +Optionally set a level at which the request and response payload of each request made will be logged. + + +Type: `string` +Default: `""` +Options: `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`, ``. + +##### oauth + +Allows you to specify open authentication via OAuth version 1. + + +Type: `object` + +##### oauth.enabled + +Whether to use OAuth version 1 in requests. + + +Type: `bool` +Default: `false` + +##### oauth.consumer_key + +A value used to identify the client to the service provider. + + +Type: `string` +Default: `""` + +##### oauth.consumer_secret + +A secret used to establish ownership of the consumer key. + + +Type: `string` +Default: `""` + +##### oauth.access_token + +A value used to gain access to the protected resources on behalf of the user. + + +Type: `string` +Default: `""` + +##### oauth.access_token_secret + +A secret provided in order to establish ownership of a given access token. + + +Type: `string` +Default: `""` + +##### oauth2 + +Allows you to specify open authentication via OAuth version 2 using the client credentials token flow. + + +Type: `object` + +##### oauth2.enabled + +Whether to use OAuth version 2 in requests. + + +Type: `bool` +Default: `false` + +##### oauth2.client_key + +A value used to identify the client to the token provider. + + +Type: `string` +Default: `""` + +##### oauth2.client_secret + +A secret used to establish ownership of the client key. + + +Type: `string` +Default: `""` + +##### oauth2.token_url + +The URL of the token provider. + + +Type: `string` +Default: `""` + +##### oauth2.scopes + +A list of optional requested permissions. + + +Type: `array` +Default: `[]` + +##### oauth2.endpoint_params + +A list of optional endpoint parameters, values should be arrays of strings. + + +Type: `object` +Default: `{}` + +```yml +# Examples + +endpoint_params: + bar: + - woof + foo: + - meow + - quack +``` + +##### basic_auth + +Allows you to specify basic authentication. + + +Type: `object` + +##### basic_auth.enabled + +Whether to use basic authentication in requests. + + +Type: `bool` +Default: `false` + +##### basic_auth.username + +A username to authenticate as. + + +Type: `string` +Default: `""` + +##### basic_auth.password + +A password to authenticate with. + + +Type: `string` +Default: `""` + +##### jwt + +Allows you to specify JWT authentication. + + +Type: `object` + +##### jwt.enabled + +Whether to use JWT authentication in requests. + + +Type: `bool` +Default: `false` + +##### jwt.private_key_file + +A file with the PEM encoded via PKCS1 or PKCS8 as private key. + + +Type: `string` +Default: `""` + +##### jwt.signing_method + +A method used to sign the token such as RS256, RS384, RS512 or EdDSA. + + +Type: `string` +Default: `""` + +##### jwt.claims + +A value used to identify the claims that issued the JWT. + + +Type: `object` +Default: `{}` + +##### jwt.headers + +Add optional key/value headers to the JWT. + + +Type: `object` +Default: `{}` + +##### tls + +Custom TLS settings can be used to override system defaults. + + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're seeing the error message `local error: tls: no renegotiation`. + + +Type: `bool` +Default: `false` + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, but not both. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +client_certs: + - cert: foo + key: bar + +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in PKCS#1 or PKCS#8 format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an attacker recover the plaintext. + +Type: `string` +Default: `""` + +```yml +# Examples + +password: foo +``` + +##### extract_headers + +Specify which response headers should be added to resulting messages as metadata. Header keys are lowercased before matching, so ensure that your patterns target lowercased versions of the header keys that you expect. + + +Type: `object` + +##### extract_headers.include_prefixes + +Provide a list of explicit metadata key prefixes to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_prefixes: + - foo_ + - bar_ + +include_prefixes: + - kafka_ + +include_prefixes: + - content- +``` + +##### extract_headers.include_patterns + +Provide a list of explicit metadata key regular expression (re2) patterns to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_patterns: + - .* + +include_patterns: + - _timestamp_unix$ +``` + +##### timeout + +A static timeout to apply to requests. + + +Type: `string` +Default: `"5s"` + +##### retry_period + +The base period to wait between failed requests. + + +Type: `string` +Default: `"1s"` + +##### max_retry_backoff + +The maximum period to wait between failed requests. + + +Type: `string` +Default: `"300s"` + +##### retries + +The maximum number of retry attempts to make. + + +Type: `int` +Default: `3` + +##### backoff_on + +A list of status codes whereby the request should be considered to have failed and retries should be attempted, but the period between them should be increased gradually. + + +Type: `array` +Default: `[429]` + +##### drop_on + +A list of status codes whereby the request should be considered to have failed but retries should not be attempted. This is useful for preventing wasted retries for requests that will never succeed. Note that with these status codes the *request* is dropped, but *message* that caused the request will not be dropped. + + +Type: `array` +Default: `[]` + +##### successful_on + +A list of status codes whereby the attempt should be considered successful, this is useful for dropping requests that return non-2XX codes indicating that the message has been dealt with, such as a 303 See Other or a 409 Conflict. All 2XX codes are considered successful unless they are present within `backoff_on` or `drop_on`, regardless of this field. + + +Type: `array` +Default: `[]` + +##### proxy_url + +An optional HTTP proxy URL. + + +Type: `string` + +##### payload + +An optional payload to deliver for each request. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + + +Type: `string` + +##### drop_empty_bodies + +Whether empty payloads received from the target server should be dropped. + + +Type: `bool` +Default: `true` + +##### stream + +Allows you to set streaming mode, where requests are kept open and messages are processed line-by-line. + + +Type: `object` + +##### stream.enabled + +Enables streaming mode. + + +Type: `bool` +Default: `false` + +##### stream.reconnect + +Sets whether to re-establish the connection once it is lost. + + +Type: `bool` +Default: `true` + + +##### auto_replay_nacks + +Whether messages that are rejected (nacked) at the output level should be automatically replayed indefinitely, eventually resulting in back pressure if the cause of the rejections is persistent. If set to `false` these messages will instead be deleted. Disabling auto replays can greatly improve memory efficiency of high throughput streams as the original shape of the data can be discarded immediately upon consumption and mutation. + + +Type: `bool` +Default: `true` + +### HTTP Server + +Receive messages POSTed over HTTP(S). HTTP 2.0 is supported when using TLS, which is enabled when key and cert files are specified. + +#### Common + +```yml +# Common config fields, showing default values +input: + label: "" + http_server: + address: "" + path: /post + ws_path: /post/ws + allowed_verbs: + - POST + timeout: 5s +``` + +#### Advanced + +```yml +# All config fields, showing default values +input: + label: "" + http_server: + address: "" + path: /post + ws_path: /post/ws + ws_welcome_message: "" + allowed_verbs: + - POST + timeout: 5s + cert_file: "" + key_file: "" + cors: + enabled: false + allowed_origins: [] + sync_response: + status: "200" + headers: + Content-Type: application/octet-stream + metadata_headers: + include_prefixes: [] + include_patterns: [] +``` + +{/* TODO add link to service wide HTTP server If the `address` config field is left blank the [service-wide HTTP server](/docs/components/http/about) will be used. */} + +{/* TODO add rate limit The field `rate_limit` allows you to specify an optional [`rate_limit` resource](/docs/components/rate_limits/about), which will be applied to each HTTP request made and each websocket payload received. + +When the rate limit is breached HTTP requests will have a 429 response returned with a Retry-After header. Websocket payloads will be dropped and an optional response payload will be sent as per `ws_rate_limit_message`. */} + +##### Responses + +{/* TODO describe how to use synchronous responses when avail: + +It's possible to return a response for each message received using synchronous responses. When doing so you can customise headers with the `sync_response` field `headers`, which can also use function interpolation in the value based on the response message contents. */} + + +##### Endpoints + +The following fields specify endpoints that are registered for sending messages, and support path parameters of the form `/{foo}`, which are added to ingested messages as metadata. A path ending in `/` will match against all extensions of that path: + +###### path (defaults to `/post`) + +This endpoint expects POST requests where the entire request body is consumed as a single message. + +If the request contains a multipart `content-type` header as per [rfc1341](https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html) then the multiple parts are consumed as a batch of messages, where each body part is a message of the batch. + +###### ws_path (defaults to `/post/ws`) + +Creates a websocket connection, where payloads received on the socket are passed through the pipeline as a batch of one message. + +Please note that components within a Tyk Streams config will register their respective endpoints in a non-deterministic order. This means that establishing precedence of endpoints that are registered via multiple `http_server` inputs or outputs (either within brokers or from cohabiting streams) is not possible in a predictable way. + +This ambiguity makes it difficult to ensure that paths which are both a subset of a path registered by a separate component, and end in a slash (`/`) and will therefore match against all extensions of that path, do not prevent the more specific path from matching against requests. + +It is therefore recommended that you ensure paths of separate components do not collide unless they are explicitly non-competing. + +For example, if you were to deploy two separate `http_server` inputs, one with a path `/foo/` and the other with a path `/foo/bar`, it would not be possible to ensure that the path `/foo/` does not swallow requests made to `/foo/bar`. + +You may specify an optional `ws_welcome_message`, which is a static payload to be sent to all clients once a websocket connection is first established. + +##### Metadata + +This input adds the following metadata fields to each message: + +``` text +- http_server_user_agent +- http_server_request_path +- http_server_verb +- http_server_remote_ip +- All headers (only first values are taken) +- All query parameters +- All path parameters +- All cookies +``` + +If HTTPS is enabled, the following fields are added as well: +``` text +- http_server_tls_version +- http_server_tls_subject +- http_server_tls_cipher_suite +``` + +{/* TODO: when interpolaion supported +You can access these metadata fields using interpolation functions. */} + +#### Examples + + +##### Path Switching + +This example shows an `http_server` input that captures all requests and processes them by switching on that path: + +```yaml +input: + http_server: + path: / + allowed_verbs: [ GET, POST ] + sync_response: + headers: + Content-Type: application/json + + processors: + - switch: + - check: '@http_server_request_path == "/foo"' + processors: + - mapping: | + root.title = "You Got Fooed!" + root.result = content().string().uppercase() + + - check: '@http_server_request_path == "/bar"' + processors: + - mapping: 'root.title = "Bar Is Slow"' + - sleep: # Simulate a slow endpoint + duration: 1s +``` + +##### Mock OAuth 2.0 Server + +This example shows an `http_server` input that mocks an OAuth 2.0 Client Credentials flow server at the endpoint `/oauth2_test`: + +```yaml +input: + http_server: + path: /oauth2_test + allowed_verbs: [ GET, POST ] + sync_response: + headers: + Content-Type: application/json + + processors: + - log: + message: "Received request" + level: INFO + fields_mapping: | + root = @ + root.body = content().string() + + - mapping: | + root.access_token = "MTQ0NjJkZmQ5OTM2NDE1ZTZjNGZmZjI3" + root.token_type = "Bearer" + root.expires_in = 3600 + + - sync_response: {} + - mapping: 'root = deleted()' +``` + +#### Fields + +##### address + +An alternative address to host from. If left empty the service wide address is used. + + +Type: `string` +Default: `""` + +##### path + +The endpoint path to listen for POST requests. + + +Type: `string` +Default: `"/post"` + +##### ws_path + +The endpoint path to create websocket connections from. + + +Type: `string` +Default: `"/post/ws"` + +##### ws_welcome_message + +An optional message to deliver to fresh websocket connections. + + +Type: `string` +Default: `""` + +##### allowed_verbs + +An array of verbs that are allowed for the `path` endpoint. + + +Type: `array` +Default: `["POST"]` +Requires version 3.33.0 or newer + +##### timeout + +Timeout for requests. If a consumed messages takes longer than this to be delivered the connection is closed, but the message may still be delivered. + + +Type: `string` +Default: `"5s"` + +{/* TODO add rate limit ##### rate_limit + +An optional [rate limit](/docs/components/rate_limits/about) to throttle requests by. */} + + +Type: `string` +Default: `""` + +##### cert_file + +Enable TLS by specifying a certificate and key file. Only valid with a custom `address`. + + +Type: `string` +Default: `""` + +##### key_file + +Enable TLS by specifying a certificate and key file. Only valid with a custom `address`. + + +Type: `string` +Default: `""` + +##### cors + +Adds Cross-Origin Resource Sharing headers. Only valid with a custom `address`. + + +Type: `object` +Requires version 3.63.0 or newer + +##### cors.enabled + +Whether to allow CORS requests. + + +Type: `bool` +Default: `false` + +##### cors.allowed_origins + +An explicit list of origins that are allowed for CORS requests. + + +Type: `array` +Default: `[]` + +##### sync_response + +{/* TODO add links to synchronous responses */} +Customize messages returned via synchronous responses. + + +Type: `object` + +##### sync_response.status + +Specify the status code to return with synchronous responses. This is a string value, which allows you to customize it based on resulting payloads and their metadata. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + + +Type: `string` +Default: `"200"` + +```yml +# Examples + +status: ${! json("status") } + +status: ${! meta("status") } +``` + +##### sync_response.headers + +Specify headers to return with synchronous responses. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + + +Type: `object` +Default: `{"Content-Type":"application/octet-stream"}` + +##### sync_response.metadata_headers + +Specify criteria for which metadata values are added to the response as headers. + + +Type: `object` + +##### sync_response.metadata_headers.include_prefixes + +Provide a list of explicit metadata key prefixes to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_prefixes: + - foo_ + - bar_ + +include_prefixes: + - kafka_ + +include_prefixes: + - content- +``` + +##### sync_response.metadata_headers.include_patterns + +Provide a list of explicit metadata key regular expression (re2) patterns to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_patterns: + - .* + +include_patterns: + - _timestamp_unix$ +``` + +### Kafka + +Connects to Kafka brokers and consumes one or more topics. + +#### Common + +```yml +# Common config fields, showing default values +input: + label: "" + kafka: + addresses: [] # No default (required) + topics: [] # No default (required) + target_version: 2.1.0 # No default (optional) + consumer_group: "" + checkpoint_limit: 1024 + auto_replay_nacks: true +``` + +#### Advanced + +```yml +# All config fields, showing default values +input: + label: "" + kafka: + addresses: [] # No default (required) + topics: [] # No default (required) + target_version: 2.1.0 # No default (optional) + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] + sasl: + mechanism: none + user: "" + password: "" + access_token: "" + token_cache: "" + token_key: "" + consumer_group: "" + client_id: tyk + rack_id: "" + start_from_oldest: true + checkpoint_limit: 1024 + auto_replay_nacks: true + commit_period: 1s + max_processing_period: 100ms + extract_tracing_map: root = @ # No default (optional) + group: + session_timeout: 10s + heartbeat_interval: 3s + rebalance_timeout: 60s + fetch_buffer_cap: 256 + multi_header: false + batching: + count: 0 + byte_size: 0 + period: "" + check: "" + processors: [] # No default (optional) +``` + +Offsets are managed within Kafka under the specified consumer group, and partitions for each topic are automatically balanced across members of the consumer group. + +The Kafka input allows parallel processing of messages from different topic partitions, and messages of the same topic partition are processed with a maximum parallelism determined by the field [checkpoint_limit](#checkpoint_limit). + +In order to enforce ordered processing of partition messages set the [checkpoint_limit](#checkpoint_limit) to `1` and this will force partitions to be processed in lock-step, where a message will only be processed once the prior message is delivered. + +Batching messages before processing can be enabled using the [batching](#batching) field, and this batching is performed per-partition such that messages of a batch will always originate from the same partition. This batching mechanism is capable of creating batches of greater size than the [checkpoint_limit](#checkpoint_limit), in which case the next batch will only be created upon delivery of the current one. + +##### Metadata + +This input adds the following metadata fields to each message: + +``` text +- kafka_key +- kafka_topic +- kafka_partition +- kafka_offset +- kafka_lag +- kafka_timestamp_unix +- kafka_tombstone_message +- All existing message headers (version 0.11+) +``` + +The field `kafka_lag` is the calculated difference between the high water mark offset of the partition at the time of ingestion and the current message offset. + +{/* TODO: when interpolation supported +You can access these metadata fields using function interpolation. */} + +##### Ordering + +By default messages of a topic partition can be processed in parallel, up to a limit determined by the field `checkpoint_limit`. However, if strict ordered processing is required then this value must be set to 1 in order to process shard messages in lock-step. When doing so it is recommended that you perform batching at this component for performance as it will not be possible to batch lock-stepped messages at the output level. + +##### Troubleshooting + +- I'm seeing logs that report `Failed to connect to kafka: kafka: client has run out of available brokers to talk to (Is your cluster reachable?)`, but the brokers are definitely reachable. + +Unfortunately this error message will appear for a wide range of connection problems even when the broker endpoint can be reached. Double check your authentication configuration and also ensure that you have [enabled TLS](#tlsenabled) if applicable. + +#### Fields + +##### addresses + +A list of broker addresses to connect to. If an item of the list contains commas it will be expanded into multiple addresses. + + +Type: `array` + +```yml +# Examples + +addresses: + - localhost:9092 + +addresses: + - localhost:9041,localhost:9042 + +addresses: + - localhost:9041 + - localhost:9042 +``` + +##### topics + +A list of topics to consume from. Multiple comma separated topics can be listed in a single element. Partitions are automatically distributed across consumers of a topic. Alternatively, it's possible to specify explicit partitions to consume from with a colon after the topic name, e.g. `foo:0` would consume the partition 0 of the topic foo. This syntax supports ranges, e.g. `foo:0-10` would consume partitions 0 through to 10 inclusive. + + +Type: `array` +Requires version 3.33.0 or newer + +```yml +# Examples + +topics: + - foo + - bar + +topics: + - foo,bar + +topics: + - foo:0 + - bar:1 + - bar:3 + +topics: + - foo:0,bar:1,bar:3 + +topics: + - foo:0-5 +``` + +##### target_version + +The version of the Kafka protocol to use. This limits the capabilities used by the client and should ideally match the version of your brokers. Defaults to the oldest supported stable version. + + +Type: `string` + +```yml +# Examples + +target_version: 2.1.0 + +target_version: 3.1.0 +``` + +##### tls + +Custom TLS settings can be used to override system defaults. + + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're seeing the error message `local error: tls: no renegotiation`. + + +Type: `bool` +Default: `false` +Requires version 3.45.0 or newer + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, but not both. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +client_certs: + - cert: foo + key: bar + +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in PKCS#1 or PKCS#8 format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an attacker recover the plaintext. + + +Type: `string` +Default: `""` + +```yml +# Example + +password: foo +``` + +{/* When Tyk streams with secrets released include this in above example => password: ${KEY_PASSWORD} */} + +##### sasl + +Enables SASL authentication. + + +Type: `object` + +##### sasl.mechanism + +The SASL authentication mechanism, if left empty SASL authentication is not used. + + +Type: `string` +Default: `"none"` + +| Option | Summary | +| :--- | :--- | +| `OAUTHBEARER` | OAuth Bearer based authentication. | +| `PLAIN` | Plain text authentication. NOTE: When using plain text auth it is extremely likely that you'll also need to [enable TLS](#tlsenabled). | +| `SCRAM-SHA-256` | Authentication using the SCRAM-SHA-256 mechanism. | +| `SCRAM-SHA-512` | Authentication using the SCRAM-SHA-512 mechanism. | +| `none` | Default, no SASL authentication. | + + +##### sasl.user + +A PLAIN username. It is recommended that you use environment variables to populate this field. + + +Type: `string` +Default: `""` + +```yml +# Examples + +user: ${USER} +``` + +##### sasl.password + +A PLAIN password. It is recommended that you use environment variables to populate this field. + + +Type: `string` +Default: `""` + +```yml +# Examples + +password: ${PASSWORD} +``` + +##### sasl.access_token + +A static OAUTHBEARER access token + + +Type: `string` +Default: `""` + +{/* TODO add ##### sasl.token_cache + +Instead of using a static `access_token` allows you to query a [`cache`](/docs/components/caches/about) resource to fetch OAUTHBEARER tokens from */} + + +Type: `string` +Default: `""` + +##### sasl.token_key + +Required when using a `token_cache`, the key to query the cache with for tokens. + + +Type: `string` +Default: `""` + +##### consumer_group + +An identifier for the consumer group of the connection. This field can be explicitly made empty in order to disable stored offsets for the consumed topic partitions. + + +Type: `string` +Default: `""` + +##### client_id + +An identifier for the client connection. + + +Type: `string` +Default: `"tyk"` + +##### rack_id + +A rack identifier for this client. + + +Type: `string` +Default: `""` + +##### start_from_oldest + +Determines whether to consume from the oldest available offset, otherwise messages are consumed from the latest offset. The setting is applied when creating a new consumer group or the saved offset no longer exists. + + +Type: `bool` +Default: `true` + +##### checkpoint_limit + +The maximum number of messages of the same topic and partition that can be processed at a given time. Increasing this limit enables parallel processing and batching at the output level to work on individual partitions. Any given offset will not be committed unless all messages under that offset are delivered in order to preserve at least once delivery guarantees. + + +Type: `int` +Default: `1024` +Requires version 3.33.0 or newer + +##### auto_replay_nacks + +Whether messages that are rejected (nacked) at the output level should be automatically replayed indefinitely, eventually resulting in back pressure if the cause of the rejections is persistent. If set to `false` these messages will instead be deleted. Disabling auto replays can greatly improve memory efficiency of high throughput streams as the original shape of the data can be discarded immediately upon consumption and mutation. + + +Type: `bool` +Default: `true` + +##### commit_period + +The period of time between each commit of the current partition offsets. Offsets are always committed during shutdown. + + +Type: `string` +Default: `"1s"` + +##### max_processing_period + +A maximum estimate for the time taken to process a message, this is used for tuning consumer group synchronization. + + +Type: `string` +Default: `"100ms"` + +##### extract_tracing_map + +A Bloblang mapping that attempts to extract an object containing tracing propagation information, which will then be used as the root tracing span for the message. The specification of the extracted fields must match the format used by the service wide tracer. + + +Type: `string` +Requires version 3.45.0 or newer + +```yml +# Examples + +extract_tracing_map: root = @ + +extract_tracing_map: root = this.meta.span +``` + +##### group + +Tuning parameters for consumer group synchronization. + + +Type: `object` + +##### group.session_timeout + +A period after which a consumer of the group is kicked after no heartbeats. + + +Type: `string` +Default: `"10s"` + +##### group.heartbeat_interval + +A period in which heartbeats should be sent out. + + +Type: `string` +Default: `"3s"` + +##### group.rebalance_timeout + +A period after which rebalancing is abandoned if unresolved. + + +Type: `string` +Default: `"60s"` + +##### fetch_buffer_cap + +The maximum number of unprocessed messages to fetch at a given time. + + +Type: `int` +Default: `256` + +##### multi_header + +Decode headers into lists to allow handling of multiple values with the same key + + +Type: `bool` +Default: `false` + +##### batching + +Allows you to configure a [batching policy](/api-management/stream-config#batch-policy). + +Type: `object` + +```yml +# Examples + +batching: + byte_size: 5000 + count: 0 + period: 1s + +batching: + count: 10 + period: 1s + +batching: + check: this.contains("END BATCH") + count: 0 + period: 1m +``` + +##### batching.count + +A number of messages at which the batch should be flushed. If `0` disables count based batching. + + +Type: `int` +Default: `0` + +##### batching.byte_size + +An amount of bytes at which the batch should be flushed. If `0` disables size based batching. + + +Type: `int` +Default: `0` + +##### batching.period + +A period in which an incomplete batch should be flushed regardless of its size. + + +Type: `string` +Default: `""` + +```yml +# Examples + +period: 1s + +period: 1m + +period: 500ms +``` + +##### batching.check + +A Bloblang query that should return a boolean value indicating whether a message should end a batch. + +Type: `string` +Default: `""` + +```yml +# Examples + +check: this.type == "end_of_transaction" +``` + +##### batching.processors + +A list of processors to apply to a batch as it is flushed. This allows you to aggregate and archive the batch however you see fit. Please note that all resulting messages are flushed as a single batch, therefore splitting the batch into smaller batches using these processors is a no-op. + + +Type: `array` + +```yml +# Examples + +processors: + - archive: + format: concatenate + +processors: + - archive: + format: lines + +processors: + - archive: + format: json_array +``` + +### MQTT +Subscribe to topics on MQTT brokers. + +#### Common +```yml +# Common config fields, showing default values +input: + label: "" + mqtt: + urls: [] # No default (required) + client_id: "" + connect_timeout: 30s + topics: [] # No default (required) + auto_replay_nacks: true +``` + +#### Advanced +```yml +# All config fields, showing default values +input: + label: "" + mqtt: + urls: [] # No default (required) + client_id: "" + dynamic_client_id_suffix: "" # No default (optional) + connect_timeout: 30s + will: + enabled: false + qos: 0 + retained: false + topic: "" + payload: "" + user: "" + password: "" + keepalive: 30 + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] + topics: [] # No default (required) + qos: 1 + clean_session: true + auto_replay_nacks: true +``` + +#### Metadata + +This input adds the following metadata fields to each message: + +``` text +- mqtt_duplicate +- mqtt_qos +- mqtt_retained +- mqtt_topic +- mqtt_message_id +``` + +You can access these metadata fields using function interpolation. + +#### Fields + +##### urls + +A list of URLs to connect to. If an item of the list contains commas it will be expanded into multiple URLs. + + +Type: `array` + +```yml +# Examples + +urls: + - tcp://localhost:1883 +``` + +##### client_id + +An identifier for the client connection. + + +Type: `string` +Default: `""` + +##### dynamic_client_id_suffix + +Append a dynamically generated suffix to the specified `client_id` on each run of the pipeline. This can be useful when clustering Streams producers. + + +Type: `string` + +| Option | Summary | +| :--- | :--- | +| `nanoid` | append a nanoid of length 21 characters | + + +##### connect_timeout + +The maximum amount of time to wait in order to establish a connection before the attempt is abandoned. + + +Type: `string` +Default: `"30s"` +Requires version 1.0.0 or newer + +```yml +# Examples + +connect_timeout: 1s + +connect_timeout: 500ms +``` + +##### will + +Set last will message in case of Streams failure + + +Type: `object` + +##### will.enabled + +Whether to enable last will messages. + + +Type: `bool` +Default: `false` + +##### will.qos + +Set QoS for last will message. Valid values are: 0, 1, 2. + + +Type: `int` +Default: `0` + +##### will.retained + +Set retained for last will message. + + +Type: `bool` +Default: `false` + +##### will.topic + +Set topic for last will message. + + +Type: `string` +Default: `""` + +##### will.payload + +Set payload for last will message. + + +Type: `string` +Default: `""` + +##### user + +A username to connect with. + + +Type: `string` +Default: `""` + +##### password + +A password to connect with. + + +Type: `string` +Default: `""` + +##### keepalive + +Max seconds of inactivity before a keepalive message is sent. + + +Type: `int` +Default: `30` + +##### tls + +Custom TLS settings can be used to override system defaults. + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're seeing the error message `local error: tls: no renegotiation`. + + +Type: `bool` +Default: `false` +Requires version 1.0.0 or newer + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, but not both. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +client_certs: + - cert: foo + key: bar + +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in PKCS#1 or PKCS#8 format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an attacker recover the plaintext. + + +Type: `string` +Default: `""` + +```yml +# Examples + +password: foo + +password: ${KEY_PASSWORD} +``` + +##### topics + +A list of topics to consume from. + + +Type: `array` + +##### qos + +The level of delivery guarantee to enforce. Has options 0, 1, 2. + + +Type: `int` +Default: `1` + +##### clean_session + +Set whether the connection is non-persistent. + + +Type: `bool` +Default: `true` + +##### auto_replay_nacks + +Whether messages that are rejected (nacked) at the output level should be automatically replayed indefinitely, eventually resulting in back pressure if the cause of the rejections is persistent. If set to `false` these messages will instead be deleted. Disabling auto replays can greatly improve memory efficiency of high throughput streams as the original shape of the data can be discarded immediately upon consumption and mutation. + +Type: `bool` +Default: `true` + + +### amqp_0_9 + +Connects to an AMQP (0.91) queue. AMQP is a messaging protocol used by various message brokers, including RabbitMQ. + +#### Common + +```yaml +# Common config fields, showing default values +input: + label: "" + amqp_0_9: + urls: [] # No default (required) + queue: "" # No default (required) + consumer_tag: "" + prefetch_count: 10 +``` + +#### Advanced + +```yaml +# All config fields, showing default values +input: + label: "" + amqp_0_9: + urls: [] # No default (required) + queue: "" # No default (required) + queue_declare: + enabled: false + durable: true + auto_delete: false + bindings_declare: [] # No default (optional) + consumer_tag: "" + auto_ack: false + nack_reject_patterns: [] + prefetch_count: 10 + prefetch_size: 0 + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] +``` + +TLS is automatic when connecting to an `amqps` URL, but custom settings can be enabled in the `tls` section. + +#### Metadata + +This input adds the following metadata fields to each message: + +``` +- amqp_content_type +- amqp_content_encoding +- amqp_delivery_mode +- amqp_priority +- amqp_correlation_id +- amqp_reply_to +- amqp_expiration +- amqp_message_id +- amqp_timestamp +- amqp_type +- amqp_user_id +- amqp_app_id +- amqp_consumer_tag +- amqp_delivery_tag +- amqp_redelivered +- amqp_exchange +- amqp_routing_key +``` + +All existing message headers, including nested headers prefixed with the key of their respective parent, can be added. + +#### Fields + +##### urls + +A list of URLs to connect to. The first URL to successfully establish a connection will be used until the connection is closed. +If an item of the list contains commas, it will be expanded into multiple URLs. + +Type: `array` + +```yaml +# Examples +urls: + - amqp://guest:guest@127.0.0.1:5672/ +urls: + - amqp://127.0.0.1:5672/,amqp://127.0.0.2:5672/ +urls: + - amqp://127.0.0.1:5672/ + - amqp://127.0.0.2:5672/ +``` + +##### queue + +An AMQP queue to consume from. + +Type: `string` + +##### queue_declare + +Allows you to passively declare the target queue. If the queue already exists, then the declaration passively verifies that +they match the target fields. + +type: `object` + +##### queue_declare.enabled + +Whether to enable queue declaration. + +Type: `bool` +Default: `false` + +##### queue_declare.durable + +Whether the declared queue is durable. + +Type: `bool` +Default: `true` + +##### queue_declare.auto_delete + +Whether the declared queue will auto-delete. + +Type: `bool` +Default: `false` + +##### bindings_declare + +Allows you to passively declare bindings for the target queue. + +Type: `array` + +```yaml +# Examples +bindings_declare: + - exchange: foo + key: bar +``` + +##### bindings_declare[].exchange + +The exchange of the declared binding. + +Type: `string` +Default: `""` + +##### bindings_declare[].key + +The key of the declared binding. + +Type: `string` +Default: `""` + +##### consumer_tag + +A consumer tag. + +Type: `string` +Default: `""` + +##### auto_ack + +Acknowledge messages automatically as they are consumed rather than waiting for acknowledgments from downstream. +This can improve throughput and prevent the pipeline from blocking but at the cost of eliminating delivery guarantees. + +Type: `bool` +Default: `false` + +##### nack_reject_patterns + +A list of regular expression patterns whereby if a message that has failed to be delivered by Bento has an error that matches +it will be dropped (or delivered to a dead-letter queue if one exists). By default, failed messages are nacked with requeue enabled. + +Type: `array` +Default: `[]` + +```yaml +# Examples +nack_reject_patterns: + - ^reject me please:.+$ +``` + +##### prefetch_count + +The maximum number of pending messages to have consumed at a time. + +Type: `int` +Default: `10` + +##### prefetch_size + +The maximum number of pending messages measured in bytes to have consumed at a time. + +Type: `int` +Default: `0` + +##### tls + +Custom TLS settings can be used to override system defaults. + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're getting the error message +`local error: tls: no renegotiation.` + +Type: `bool` +Default: `false` + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, +to possible intermediate signing certificates, to the host certificate. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +```yaml +# Examples +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing +a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host +certificate. + +Type: `string` +Default: `""` + +```yaml +# Examples +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, +but not both. + +Type: `array` +Default: `[]` + +```yaml +# Examples +client_certs: + - cert: foo + key: bar +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: "" + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in *PKCS#1* or *PKCS#8* format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not +supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an +attacker recover the plaintext. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +```yaml +# Examples +password: foo +``` + + + +### amqp_1 + +Reads messages from an AMQP (1.0) server. + +#### Common + +```yaml +# Common config fields, showing default values +input: + label: "" + amqp_1: + urls: [] # No default (optional) + source_address: /foo # No default (required) +``` + +#### Advanced + +```yaml +# All config fields, showing default values +input: + label: "" + amqp_1: + urls: [] # No default (optional) + source_address: /foo # No default (required) + azure_renew_lock: false + read_header: false + credit: 64 + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] + sasl: + mechanism: none + user: "" + password: "" +``` + +#### Metadata + +This input adds the following metadata fields to each message: + +``` +- amqp_content_type +- amqp_content_encoding +- amqp_creation_time +- All string typed message annotations +``` + +You can access these metadata fields using function interpolation. + +By setting `read_header` to `true`, additional message header properties will be added to each message: + +``` +- amqp_durable +- amqp_priority +- amqp_ttl +- amqp_first_acquirer +- amqp_delivery_count +``` + +#### Performance + +This input benefits from receiving multiple messages in flight in parallel for improved performance. You can tune the max +number of in flight messages with the field `credit`. + +#### Fields + +##### urls + +A list of URLs to connect to. The first URL to successfully establish a connection will be used until the connection is closed. +If an item of the list contains commas it will be expanded into multiple URLs. + +Type: `array` + +```yaml +# Examples +urls: + - amqp://guest:guest@127.0.0.1:5672/ +urls: + - amqp://127.0.0.1:5672/,amqp://127.0.0.2:5672/ +urls: + - amqp://127.0.0.1:5672/ + - amqp://127.0.0.2:5672/ +``` + +##### source_address + +The source address to consume from. + +Type: `string` + +```yaml +# Examples +source_address: /foo +source_address: queue:/bar +source_address: topic:/baz +``` + +##### azure_renew_lock + +**Experimental:** Azure service bus specific option to renew lock if processing takes more then configured lock time. + +Type: `bool` +Default: `false` + +##### read_header + +Read additional message header fields into `amqp_*` metadata properties. + +Type: `bool` +Default: `false` + +##### credit + +Specifies the maximum number of unacknowledged messages the sender can transmit. Once this limit is reached, no more messages +will arrive until messages are acknowledged and settled. + +Type: `int` +Default: `64` + + +##### tls + +Custom TLS settings can be used to override system defaults. + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're getting the error message +`local error: tls: no renegotiation.` + +Type: `bool` +Default: `false` + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, +to possible intermediate signing certificates, to the host certificate. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +```yaml +# Examples +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing +a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host +certificate. + +Type: `string` +Default: `""` + +```yaml +# Examples +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, +but not both. + +Type: `array` +Default: `[]` + +```yaml +# Examples +client_certs: + - cert: foo + key: bar +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: "" + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in *PKCS#1* or *PKCS#8* format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not +supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an +attacker recover the plaintext. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +```yaml +# Examples +password: foo +``` + +##### sasl + +Enables SASL authentication. + +Type: `object` + +##### sasl.mechanism + +The SASL authentication mechanism to use. + +Type: `string` +Default: `"none"` + +| Option | Summary | +| :----------- | :-------------------------------------- | +| anonymous | Anonymous SASL authentication. | +| none | No SASL based authentication. | +| plain | Plain text SASL authentication. | + + +##### sasl.user + +A SASL plain text username. It is recommended that you use environment variables to populate this field. + +Type: `string` +Default: `""` + +```yaml +# Examples +user: ${USER} +``` + +##### sasl.password + +A SASL plain text password. It is recommended that you use environment variables to populate this field. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + + +```yaml +# Examples +password: ${PASSWORD} +``` + +## Outputs + +### Overview + +An output is a sink where we wish to send our consumed data after applying an optional array of [processors](/api-management/stream-config#overview-3). Only one output is configured at the root of a Tyk Streams config. However, the output can be a [broker](/api-management/stream-config#broker-1) which combines multiple outputs under a chosen brokering pattern. + +An output config section looks like this: + +```yaml +outout: + label: my_kafka_output + + kafka: + addresses: [ localhost:9092 ] + topic: "foobar" + + # Optional list of processing steps + processors: + - avro: + operator: from_json +``` + +#### Labels + +Outputs have an optional field `label` that can uniquely identify them in observability data such as logs. + +{/* TODO replace with this paragraph when determine if product supports metrics + +Outputs have an optional field `label` that can uniquely identify them in observability data such as metrics and logs. This can be useful when running configs with multiple outputs, otherwise their metrics labels will be generated based on their composition. For more information check out the [metrics documentation][metrics.about]. */} + +### Broker + +Allows you to route messages to multiple child outputs using a range of brokering [patterns](#patterns). + +#### Common + +```yml +# Common config fields, showing default values +output: + label: "" + broker: + pattern: fan_out + outputs: [] # No default (required) + batching: + count: 0 + byte_size: 0 + period: "" + check: "" +``` + +#### Advanced + +```yml +# All config fields, showing default values +output: + label: "" + broker: + copies: 1 + pattern: fan_out + outputs: [] # No default (required) + batching: + count: 0 + byte_size: 0 + period: "" + check: "" + processors: [] # No default (optional) +``` + +Processors can be listed to apply across individual outputs or all outputs: + +```yaml +output: + broker: + pattern: fan_out + outputs: + - resource: foo + - resource: bar + # Processors only applied to messages sent to bar. + processors: + - resource: bar_processor + + # Processors applied to messages sent to all brokered outputs. + processors: + - resource: general_processor +``` + +#### Fields + +##### copies + +The number of copies of each configured output to spawn. + + +Type: `int` +Default: `1` + +##### pattern + +The brokering pattern to use. + + +Type: `string` +Default: `"fan_out"` +Options: `fan_out`, `fan_out_fail_fast`, `fan_out_sequential`, `fan_out_sequential_fail_fast`, `round_robin`, `greedy`. + +##### outputs + +A list of child outputs to broker. + + +Type: `array` + +##### batching + +Allows you to configure a [batching policy](/api-management/stream-config#batch-policy). + + +Type: `object` + +```yml +# Examples + +batching: + byte_size: 5000 + count: 0 + period: 1s + +batching: + count: 10 + period: 1s + +batching: + check: this.contains("END BATCH") + count: 0 + period: 1m +``` + +##### batching.count + +A number of messages at which the batch should be flushed. If `0` disables count based batching. + + +Type: `int` +Default: `0` + +##### batching.byte_size + +An amount of bytes at which the batch should be flushed. If `0` disables size based batching. + + +Type: `int` +Default: `0` + +##### batching.period + +A period in which an incomplete batch should be flushed regardless of its size. + + +Type: `string` +Default: `""` + +```yml +# Examples + +period: 1s + +period: 1m + +period: 500ms +``` + +##### batching.check + +A Bloblang query that should return a boolean value indicating whether a message should end a batch. + + +Type: `string` +Default: `""` + +```yml +# Examples + +check: this.type == "end_of_transaction" +``` + +##### batching.processors + +A list of processors to apply to a batch as it is flushed. This allows you to aggregate and archive the batch however you see fit. Please note that all resulting messages are flushed as a single batch, therefore splitting the batch into smaller batches using these processors is a no-op. + + +Type: `array` + +```yml +# Examples + +processors: + - archive: + format: concatenate + +processors: + - archive: + format: lines + +processors: + - archive: + format: json_array +``` + +#### Patterns + +The broker pattern determines the way in which messages are allocated and can be chosen from the following: + +##### fan_out + +With the fan out pattern all outputs will be sent every message that passes through Tyk Streams in parallel. + +If an output applies back pressure it will block all subsequent messages, and if an output fails to send a message it will be retried continuously until completion or service shut down. This mechanism is in place in order to prevent one bad output from causing a larger retry loop that results in a good output from receiving unbounded message duplicates. + +##### fan_out_fail_fast + +The same as the `fan_out` pattern, except that output failures will not be automatically retried. This pattern should be used with caution as busy retry loops could result in unlimited duplicates being introduced into the non-failure outputs. + +##### fan_out_sequential + +Similar to the fan out pattern except outputs are written to sequentially, meaning an output is only written to once the preceding output has confirmed receipt of the same message. + +If an output applies back pressure it will block all subsequent messages, and if an output fails to send a message it will be retried continuously until completion or service shut down. This mechanism is in place in order to prevent one bad output from causing a larger retry loop that results in a good output from receiving unbounded message duplicates. + +##### fan_out_sequential_fail_fast + +The same as the `fan_out_sequential` pattern, except that output failures will not be automatically retried. This pattern should be used with caution as busy retry loops could result in unlimited duplicates being introduced into the non-failure outputs. + +##### round_robin + +With the round robin pattern each message will be assigned a single output following their order. If an output applies back pressure it will block all subsequent messages. If an output fails to send a message then the message will be re-attempted with the next input, and so on. + +##### greedy + +The greedy pattern results in higher output throughput at the cost of potentially disproportionate message allocations to those outputs. Each message is sent to a single output, which is determined by allowing outputs to claim messages as soon as they are able to process them. This results in certain faster outputs potentially processing more messages at the cost of slower outputs. + + +### HTTP Client + +Sends messages to an HTTP server. + +#### Common + +```yml +# Common config fields, showing default values +output: + label: "" + http_client: + url: "" # No default (required) + verb: POST + headers: {} + timeout: 5s + max_in_flight: 64 + batching: + count: 0 + byte_size: 0 + period: "" + check: "" +``` + +#### Advanced + +```yml +# All config fields, showing default values +output: + label: "" + http_client: + url: "" # No default (required) + verb: POST + headers: {} + metadata: + include_prefixes: [] + include_patterns: [] + dump_request_log_level: "" + oauth: + enabled: false + consumer_key: "" + consumer_secret: "" + access_token: "" + access_token_secret: "" + oauth2: + enabled: false + client_key: "" + client_secret: "" + token_url: "" + scopes: [] + endpoint_params: {} + basic_auth: + enabled: false + username: "" + password: "" + jwt: + enabled: false + private_key_file: "" + signing_method: "" + claims: {} + headers: {} + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] + extract_headers: + include_prefixes: [] + include_patterns: [] + timeout: 5s + retry_period: 1s + max_retry_backoff: 300s + retries: 3 + backoff_on: + - 429 + drop_on: [] + successful_on: [] + proxy_url: "" # No default (optional) + batch_as_multipart: false + propagate_response: false + max_in_flight: 64 + batching: + count: 0 + byte_size: 0 + period: "" + check: "" + processors: [] # No default (optional) + multipart: [] +``` + +When the number of retries expires the output will reject the message, the behavior after this will depend on the pipeline but usually this simply means the send is attempted again until successful whilst applying back pressure. + +{/* TODO: when interpolation supported +The URL and header values of this type can be dynamically set using function interpolations. */} + +The body of the HTTP request is the raw contents of the message payload. If the message has multiple parts (is a batch) the request will be sent according to [RFC1341](https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html). This behavior can be disabled by setting the field [batch_as_multipart](#batch_as_multipart) to `false`. + +##### Propagating Responses + +It's possible to propagate the response from each HTTP request back to the input source by setting `propagate_response` to `true`. Only inputs that support synchronous responses are able to make use of these propagated responses. + +#### Performance + +This output benefits from sending multiple messages in flight in parallel for improved performance. You can tune the max number of in flight messages (or message batches) with the field `max_in_flight`. + +This output benefits from sending messages as a [batch](/api-management/stream-config#batching-6) for improved performance. Batches can be formed at both the input and output level. + +#### Fields + +##### url + +The URL to connect to. +{/* TODO: when interpolation supported +This field supports interpolation functions. */} + +Type: `string` + +##### verb + +A verb to connect with + + +Type: `string` +Default: `"POST"` + +```yml +# Examples + +verb: POST + +verb: GET + +verb: DELETE +``` + +##### headers + +A map of headers to add to the request. +{/* TODO: when interpolation supported +This field supports interpolation functions. */} + + +Type: `object` +Default: `{}` + +```yml +# Examples + +headers: + Content-Type: application/octet-stream + traceparent: ${! tracing_span().traceparent } +``` + +##### metadata + +Specify optional matching rules to determine which metadata keys should be added to the HTTP request as headers. + + +Type: `object` + +##### metadata.include_prefixes + +Provide a list of explicit metadata key prefixes to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_prefixes: + - foo_ + - bar_ + +include_prefixes: + - kafka_ + +include_prefixes: + - content- +``` + +##### metadata.include_patterns + +Provide a list of explicit metadata key regular expression (re2) patterns to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_patterns: + - .* + +include_patterns: + - _timestamp_unix$ +``` + +##### dump_request_log_level + +Optionally set a level at which the request and response payload of each request made will be logged. + + +Type: `string` +Default: `""` +Options: `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`, ``. + +##### oauth + +Allows you to specify open authentication via OAuth version 1. + + +Type: `object` + +##### oauth.enabled + +Whether to use OAuth version 1 in requests. + + +Type: `bool` +Default: `false` + +##### oauth.consumer_key + +A value used to identify the client to the service provider. + + +Type: `string` +Default: `""` + +##### oauth.consumer_secret + +A secret used to establish ownership of the consumer key. + + +Type: `string` +Default: `""` + +##### oauth.access_token + +A value used to gain access to the protected resources on behalf of the user. + + +Type: `string` +Default: `""` + +##### oauth.access_token_secret + +A secret provided in order to establish ownership of a given access token. + + +Type: `string` +Default: `""` + +##### oauth2 + +Allows you to specify open authentication via OAuth version 2 using the client credentials token flow. + + +Type: `object` + +##### oauth2.enabled + +Whether to use OAuth version 2 in requests. + + +Type: `bool` +Default: `false` + +##### oauth2.client_key + +A value used to identify the client to the token provider. + + +Type: `string` +Default: `""` + +##### oauth2.client_secret + +A secret used to establish ownership of the client key. + + +Type: `string` +Default: `""` + +##### oauth2.token_url + +The URL of the token provider. + + +Type: `string` +Default: `""` + +##### oauth2.scopes + +A list of optional requested permissions. + + +Type: `array` +Default: `[]` + +##### oauth2.endpoint_params + +A list of optional endpoint parameters, values should be arrays of strings. + + +Type: `object` +Default: `{}` + +```yml +# Examples + +endpoint_params: + bar: + - woof + foo: + - meow + - quack +``` + +##### basic_auth + +Allows you to specify basic authentication. + + +Type: `object` + +##### basic_auth.enabled + +Whether to use basic authentication in requests. + + +Type: `bool` +Default: `false` + +##### basic_auth.username + +A username to authenticate as. + + +Type: `string` +Default: `""` + +##### basic_auth.password + +A password to authenticate with. + + +Type: `string` +Default: `""` + +##### jwt + +Allows you to specify JWT authentication. + + +Type: `object` + +##### jwt.enabled + +Whether to use JWT authentication in requests. + + +Type: `bool` +Default: `false` + +##### jwt.private_key_file + +A file with the PEM encoded via PKCS1 or PKCS8 as private key. + + +Type: `string` +Default: `""` + +##### jwt.signing_method + +A method used to sign the token such as RS256, RS384, RS512 or EdDSA. + + +Type: `string` +Default: `""` + +##### jwt.claims + +A value used to identify the claims that issued the JWT. + + +Type: `object` +Default: `{}` + +##### jwt.headers + +Add optional key/value headers to the JWT. + + +Type: `object` +Default: `{}` + +##### tls + +Custom TLS settings can be used to override system defaults. + + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're seeing the error message `local error: tls: no renegotiation`. + + +Type: `bool` +Default: `false` + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, but not both. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +client_certs: + - cert: foo + key: bar + +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in PKCS#1 or PKCS#8 format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an attacker recover the plaintext. + + +Type: `string` +Default: `""` + +```yml +# Examples + +password: foo +``` + +##### extract_headers + +Specify which response headers should be added to resulting synchronous response messages as metadata. Header keys are lowercased before matching, so ensure that your patterns target lowercased versions of the header keys that you expect. This field is not applicable unless `propagate_response` is set to `true`. + + +Type: `object` + +##### extract_headers.include_prefixes + +Provide a list of explicit metadata key prefixes to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_prefixes: + - foo_ + - bar_ + +include_prefixes: + - kafka_ + +include_prefixes: + - content- +``` + +##### extract_headers.include_patterns + +Provide a list of explicit metadata key regular expression (re2) patterns to match against. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +include_patterns: + - .* + +include_patterns: + - _timestamp_unix$ +``` + +##### timeout + +A static timeout to apply to requests. + + +Type: `string` +Default: `"5s"` + +##### retry_period + +The base period to wait between failed requests. + + +Type: `string` +Default: `"1s"` + +##### max_retry_backoff + +The maximum period to wait between failed requests. + + +Type: `string` +Default: `"300s"` + +##### retries + +The maximum number of retry attempts to make. + + +Type: `int` +Default: `3` + +##### backoff_on + +A list of status codes whereby the request should be considered to have failed and retries should be attempted, but the period between them should be increased gradually. + + +Type: `array` +Default: `[429]` + +##### drop_on + +A list of status codes whereby the request should be considered to have failed but retries should not be attempted. This is useful for preventing wasted retries for requests that will never succeed. Note that with these status codes the _request_ is dropped, but _message_ that caused the request will not be dropped. + + +Type: `array` +Default: `[]` + +##### successful_on + +A list of status codes whereby the attempt should be considered successful, this is useful for dropping requests that return non-2XX codes indicating that the message has been dealt with, such as a 303 See Other or a 409 Conflict. All 2XX codes are considered successful unless they are present within `backoff_on` or `drop_on`, regardless of this field. + + +Type: `array` +Default: `[]` + +##### proxy_url + +An optional HTTP proxy URL. + + +Type: `string` + +##### batch_as_multipart + +Send message batches as a single request using [RFC1341](https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html). If disabled messages in batches will be sent as individual requests. + + +Type: `bool` +Default: `false` + +##### propagate_response + +Whether responses from the server should be propagated back to the input. + + +Type: `bool` +Default: `false` + +##### max_in_flight + +The maximum number of parallel message batches to have in flight at any given time. + + +Type: `int` +Default: `64` + +##### batching + +Allows you to configure a [batching policy](/api-management/stream-config#batching-6). + + +Type: `object` + +```yml +# Examples + +batching: + byte_size: 5000 + count: 0 + period: 1s + +batching: + count: 10 + period: 1s + +batching: + check: this.contains("END BATCH") + count: 0 + period: 1m +``` + +##### batching.count + +A number of messages at which the batch should be flushed. If `0` disables count based batching. + + +Type: `int` +Default: `0` + +##### batching.byte_size + +An amount of bytes at which the batch should be flushed. If `0` disables size based batching. + + +Type: `int` +Default: `0` + +##### batching.period + +A period in which an incomplete batch should be flushed regardless of its size. + + +Type: `string` +Default: `""` + +```yml +# Examples + +period: 1s + +period: 1m + +period: 500ms +``` + +{/* TODO: when bloblang supported +##### batching.check + +A Bloblang query that should return a boolean value indicating whether a message should end a batch. + + +Type: `string` +Default: `""` + +```yml +# Examples + +check: this.type == "end_of_transaction" +``` */} + +##### batching.processors + +A list of processors to apply to a batch as it is flushed. This allows you to aggregate and archive the batch however you see fit. Please note that all resulting messages are flushed as a single batch, therefore splitting the batch into smaller batches using these processors is a no-op. + + +Type: `array` + +```yml +# Examples + +processors: + - archive: + format: concatenate + +processors: + - archive: + format: lines + +processors: + - archive: + format: json_array +``` + +##### multipart + +Create explicit multipart HTTP requests by specifying an array of parts to add to the request, each part specified consists of content headers and a data field that can be populated dynamically. If this field is populated it will override the default request creation behavior. + + +Type: `array` +Default: `[]` + +##### multipart[].content_type + +The content type of the individual message part. +{/* TODO: when interpolation supported +This field supports interpolation functions. */} + + +Type: `string` +Default: `""` + +```yml +# Examples + +content_type: application/bin +``` + +##### multipart[].content_disposition + +The content disposition of the individual message part. +{/* TODO: when interpolation supported +This field supports interpolation functions. */} + + +Type: `string` +Default: `""` + +```yml +# Examples + +content_disposition: form-data; name="bin"; filename='${! @AttachmentName } +``` + +##### multipart[].body + +The body of the individual message part. +{/* TODO: when interpolation supported +This field supports interpolation functions. */} + + +Type: `string` +Default: `""` + +```yml +# Examples + +body: ${! this.data.part1 } +``` + +### HTTP Server + +Sets up an HTTP server that will send messages over HTTP(S) GET requests. HTTP 2.0 is supported when using TLS, which is enabled when key and cert files are specified. + +#### Common + +```yml +# Common config fields, showing default values +output: + label: "" + http_server: + address: "" + path: /get + stream_path: /get/stream + ws_path: /get/ws + allowed_verbs: + - GET +``` + +#### Advanced + +```yml +# All config fields, showing default values +output: + label: "" + http_server: + address: "" + path: /get + stream_path: /get/stream + ws_path: /get/ws + allowed_verbs: + - GET + timeout: 5s + cert_file: "" + key_file: "" + cors: + enabled: false + allowed_origins: [] +``` + +Sets up an HTTP server that will send messages over HTTP(S) GET requests. + +{/* TODO add link here If the `address` config field is left blank the [service-wide HTTP server](/docs/components/http/about) will be used. */} + +Three endpoints will be registered at the paths specified by the fields `path`, `stream_path` and `ws_path`. Which allow you to consume a single message batch, a continuous stream of line delimited messages, or a websocket of messages for each request respectively. + +When messages are batched the `path` endpoint encodes the batch according to [RFC1341](https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html). + +{/* TODO add link here - This behavior can be overridden by [archiving your batches](/docs/configuration/batching#post-batch-processing). */} + +Please note, messages are considered delivered as soon as the data is written to the client. There is no concept of at least once delivery on this output. + +Please note that components within a Tyk config will register their respective endpoints in a non-deterministic order. This means that establishing precedence of endpoints that are registered via multiple `http_server` inputs or outputs (either within brokers or from cohabiting streams) is not possible in a predictable way. + +This ambiguity makes it difficult to ensure that paths which are both a subset of a path registered by a separate component, and end in a slash (`/`) and will therefore match against all extensions of that path, do not prevent the more specific path from matching against requests. + +It is therefore recommended that you ensure paths of separate components do not collide unless they are explicitly non-competing. + +For example, if you were to deploy two separate `http_server` inputs, one with a path `/foo/` and the other with a path `/foo/bar`, it would not be possible to ensure that the path `/foo/` does not swallow requests made to `/foo/bar`. + + +#### Fields + +##### address + +An alternative address to host from. If left empty the service wide address is used. + + +Type: `string` +Default: `""` + +##### path + +The path from which discrete messages can be consumed. + + +Type: `string` +Default: `"/get"` + +##### stream_path + +The path from which a continuous stream of messages can be consumed. + + +Type: `string` +Default: `"/get/stream"` + +##### ws_path + +The path from which websocket connections can be established. + + +Type: `string` +Default: `"/get/ws"` + +##### allowed_verbs + +An array of verbs that are allowed for the `path` and `stream_path` HTTP endpoint. + + +Type: `array` +Default: `["GET"]` + +##### timeout + +The maximum time to wait before a blocking, inactive connection is dropped (only applies to the `path` endpoint). + + +Type: `string` +Default: `"5s"` + +##### cert_file + +Enable TLS by specifying a certificate and key file. Only valid with a custom `address`. + + +Type: `string` +Default: `""` + +##### key_file + +Enable TLS by specifying a certificate and key file. Only valid with a custom `address`. + + +Type: `string` +Default: `""` + +##### cors + +Adds Cross-Origin Resource Sharing headers. Only valid with a custom `address`. + + +Type: `object` + +##### cors.enabled + +Whether to allow CORS requests. + + +Type: `bool` +Default: `false` + +##### cors.allowed_origins + +An explicit list of origins that are allowed for CORS requests. + + +Type: `array` +Default: `[]` + + + +### Kafka + +The kafka output type writes a batch of messages to Kafka brokers and waits for acknowledgment before propagating it back to the input. + +#### Common + +```yml +# Common config fields, showing default values +output: + label: "" + kafka: + addresses: [] # No default (required) + topic: "" # No default (required) + target_version: 2.1.0 # No default (optional) + key: "" + partitioner: fnv1a_hash + compression: none + static_headers: {} # No default (optional) + metadata: + exclude_prefixes: [] + max_in_flight: 64 + batching: + count: 0 + byte_size: 0 + period: "" + check: "" +``` + +#### Advanced + +```yml +# All config fields, showing default values +output: + label: "" + kafka: + addresses: [] # No default (required) + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] + sasl: + mechanism: none + user: "" + password: "" + access_token: "" + token_cache: "" + token_key: "" + topic: "" # No default (required) + client_id: tyk + target_version: 2.1.0 # No default (optional) + rack_id: "" + key: "" + partitioner: fnv1a_hash + partition: "" + custom_topic_creation: + enabled: false + partitions: -1 + replication_factor: -1 + compression: none + static_headers: {} # No default (optional) + metadata: + exclude_prefixes: [] + inject_tracing_map: meta = @.merge(this) # No default (optional) + max_in_flight: 64 + idempotent_write: false + ack_replicas: false + max_msg_bytes: 1000000 + timeout: 5s + retry_as_batch: false + batching: + count: 0 + byte_size: 0 + period: "" + check: "" + processors: [] # No default (optional) + max_retries: 0 + backoff: + initial_interval: 3s + max_interval: 10s + max_elapsed_time: 30s +``` + +The config field `ack_replicas` determines whether we wait for acknowledgment from all replicas or just a single broker. + +{/* Add links to bloblang queries : Both the `key` and `topic` fields can be dynamically set using function interpolations. */} + +Metadata will be added to each message sent as headers (version 0.11+), but can be restricted using the field [metadata](#metadata). + +##### Strict Ordering and Retries + +When strict ordering is required for messages written to topic partitions it is important to ensure that both the field `max_in_flight` is set to `1` and that the field `retry_as_batch` is set to `true`. + +You must also ensure that failed batches are never rerouted back to the same output. This can be done by setting the field `max_retries` to `0` and `backoff.max_elapsed_time` to empty, which will apply back pressure indefinitely until the batch is sent successfully. + +{/* TODO: Add link to fallback broker */} +However, this also means that manual intervention will eventually be required in cases where the batch cannot be sent due to configuration problems such as an incorrect `max_msg_bytes` estimate. A less strict but automated alternative would be to route failed batches to a dead letter queue using a `fallback` broker, but this would allow subsequent batches to be delivered in the meantime whilst those failed batches are dealt with. + +##### Troubleshooting + +- I'm seeing logs that report `Failed to connect to kafka: kafka: client has run out of available brokers to talk to (Is your cluster reachable?)`, but the brokers are definitely reachable. + +Unfortunately this error message will appear for a wide range of connection problems even when the broker endpoint can be reached. Double check your authentication configuration and also ensure that you have [enabled TLS](#tlsenabled) if applicable. + +#### Performance + +This output benefits from sending multiple messages in flight in parallel for improved performance. You can tune the max number of in flight messages (or message batches) with the field `max_in_flight`. + +This output benefits from sending messages as a [batch](/api-management/stream-config#batching-6) for improved performance. Batches can be formed at both the input and output level. + +#### Fields + +##### addresses + +A list of broker addresses to connect to. If an item of the list contains commas it will be expanded into multiple addresses. + + +Type: `array` + +```yml +# Examples + +addresses: + - localhost:9092 + +addresses: + - localhost:9041,localhost:9042 + +addresses: + - localhost:9041 + - localhost:9042 +``` + +##### tls + +Custom TLS settings can be used to override system defaults. + + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're seeing the error message `local error: tls: no renegotiation`. + + +Type: `bool` +Default: `false` + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, but not both. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +client_certs: + - cert: foo + key: bar + +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in PKCS#1 or PKCS#8 format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an attacker recover the plaintext. + + +Type: `string` +Default: `""` + +```yml +# Example + +password: foo +``` + +{/* When Tyk streams with secrets released include this in above example => password: ${KEY_PASSWORD} */} + +##### sasl + +Enables SASL authentication. + + +Type: `object` + +##### sasl.mechanism + +The SASL authentication mechanism, if left empty SASL authentication is not used. + + +Type: `string` +Default: `"none"` + +| Option | Summary | +| :--- | :--- | +| `OAUTHBEARER` | OAuth Bearer based authentication. | +| `PLAIN` | Plain text authentication. NOTE: When using plain text auth it is extremely likely that you'll also need to [enable TLS](#tlsenabled). | +| `SCRAM-SHA-256` | Authentication using the SCRAM-SHA-256 mechanism. | +| `SCRAM-SHA-512` | Authentication using the SCRAM-SHA-512 mechanism. | +| `none` | Default, no SASL authentication. | + + +##### sasl.user + +A PLAIN username. It is recommended that you use environment variables to populate this field. + + +Type: `string` +Default: `""` + +```yml +# Examples + +user: ${USER} +``` + +##### sasl.password + +A PLAIN password. It is recommended that you use environment variables to populate this field. + + +Type: `string` +Default: `""` + +```yml +# Examples + +password: ${PASSWORD} +``` + +##### sasl.access_token + +A static OAUTHBEARER access token + + +Type: `string` +Default: `""` + +##### sasl.token_cache + +Instead of using a static `access_token` allows you to query a `cache` resource to fetch OAUTHBEARER tokens from +{/* TODO: add cache resource link */} + +Type: `string` +Default: `""` + +##### sasl.token_key + +Required when using a `token_cache`, the key to query the cache with for tokens. + + +Type: `string` +Default: `""` + +##### topic + +The topic to publish messages to. +{/* TODO: when interpolation supported +This field supports interpolation functions. */} + + +Type: `string` + +##### client_id + +An identifier for the client connection. + + +Type: `string` +Default: `"tyk"` + +##### target_version + +The version of the Kafka protocol to use. This limits the capabilities used by the client and should ideally match the version of your brokers. Defaults to the oldest supported stable version. + + +Type: `string` + +```yml +# Examples + +target_version: 2.1.0 + +target_version: 3.1.0 +``` + +##### rack_id + +A rack identifier for this client. + + +Type: `string` +Default: `""` + +##### key + +The key to publish messages with. +{/* TODO: when interpolation supported +This field supports interpolation functions. */} + + +Type: `string` +Default: `""` + +##### partitioner + +The partitioning algorithm to use. + + +Type: `string` +Default: `"fnv1a_hash"` +Options: `fnv1a_hash`, `murmur2_hash`, `random`, `round_robin`, `manual`. + +##### partition + +The manually-specified partition to publish messages to, relevant only when the field `partitioner` is set to `manual`. Must be able to parse as a 32-bit integer. +{/* TODO: when interpolation supported +This field supports interpolation functions. */} + + +Type: `string` +Default: `""` + +##### custom_topic_creation + +If enabled, topics will be created with the specified number of partitions and replication factor if they do not already exist. + + +Type: `object` + +##### custom_topic_creation.enabled + +Whether to enable custom topic creation. + + +Type: `bool` +Default: `false` + +##### custom_topic_creation.partitions + +The number of partitions to create for new topics. Leave at -1 to use the broker configured default. Must `be >= 1`. + + +Type: `int` +Default: `-1` + +##### custom_topic_creation.replication_factor + +The replication factor to use for new topics. Leave at -1 to use the broker configured default. Must be an odd number, and less then or equal to the number of brokers. + + +Type: `int` +Default: `-1` + +##### compression + +The compression algorithm to use. + + +Type: `string` +Default: `"none"` +Options: `none`, `snappy`, `lz4`, `gzip`, `zstd`. + +##### static_headers + +An optional map of static headers that should be added to messages in addition to metadata. + + +Type: `object` + +```yml +# Examples + +static_headers: + first-static-header: value-1 + second-static-header: value-2 +``` + +##### metadata + +Specify criteria for which metadata values are sent with messages as headers. + + +Type: `object` + +##### metadata.exclude_prefixes + +Provide a list of explicit metadata key prefixes to be excluded when adding metadata to sent messages. + + +Type: `array` +Default: `[]` + +##### inject_tracing_map + +A Bloblang mapping used to inject an object containing tracing propagation information into outbound messages. The specification of the injected fields will match the format used by the service wide tracer. + + +Type: `string` +Requires version 3.45.0 or newer + +```yml +# Examples + +inject_tracing_map: meta = @.merge(this) + +inject_tracing_map: root.meta.span = this +``` + +##### max_in_flight + +The maximum number of messages to have in flight at a given time. Increase this to improve throughput. + + +Type: `int` +Default: `64` + +##### idempotent_write + +Enable the idempotent write producer option. This requires the `IDEMPOTENT_WRITE` permission on `CLUSTER` and can be disabled if this permission is not available. + + +Type: `bool` +Default: `false` + +##### ack_replicas + +Ensure that messages have been copied across all replicas before acknowledging receipt. + + +Type: `bool` +Default: `false` + +##### max_msg_bytes + +The maximum size in bytes of messages sent to the target topic. + + +Type: `int` +Default: `1000000` + +##### timeout + +The maximum period of time to wait for message sends before abandoning the request and retrying. + + +Type: `string` +Default: `"5s"` + +##### retry_as_batch + +When enabled forces an entire batch of messages to be retried if any individual message fails on a send, otherwise only the individual messages that failed are retried. Disabling this helps to reduce message duplicates during intermittent errors, but also makes it impossible to guarantee strict ordering of messages. + + +Type: `bool` +Default: `false` + +##### batching + +Allows you to configure a [batching policy](/api-management/stream-config#batch-policy). + + +Type: `object` + +```yml +# Examples + +batching: + byte_size: 5000 + count: 0 + period: 1s + +batching: + count: 10 + period: 1s + +batching: + check: this.contains("END BATCH") + count: 0 + period: 1m +``` + +##### batching.count + +A number of messages at which the batch should be flushed. If `0` disables count based batching. + + +Type: `int` +Default: `0` + +##### batching.byte_size + +An amount of bytes at which the batch should be flushed. If `0` disables size based batching. + + +Type: `int` +Default: `0` + +##### batching.period + +A period in which an incomplete batch should be flushed regardless of its size. + + +Type: `string` +Default: `""` + +```yml +# Examples + +period: 1s + +period: 1m + +period: 500ms +``` + +##### batching.check + +A Bloblang query that should return a boolean value indicating whether a message should end a batch. + + +Type: `string` +Default: `""` + +```yml +# Examples + +check: this.type == "end_of_transaction" +``` + +##### batching.processors + +{/* TODO: add list of processors link */} + +A list of processors to apply to a batch as it is flushed. This allows you to aggregate and archive the batch however you see fit. Please note that all resulting messages are flushed as a single batch, therefore splitting the batch into smaller batches using these processors is a no-op. + + +Type: `array` + +```yml +# Examples + +processors: + - archive: + format: concatenate + +processors: + - archive: + format: lines + +processors: + - archive: + format: json_array +``` + +##### max_retries + +The maximum number of retries before giving up on the request. If set to zero there is no discrete limit. + + +Type: `int` +Default: `0` + +##### backoff + +Control time intervals between retry attempts. + + +Type: `object` + +##### backoff.initial_interval + +The initial period to wait between retry attempts. + + +Type: `string` +Default: `"3s"` + +```yml +# Examples + +initial_interval: 50ms + +initial_interval: 1s +``` + +##### backoff.max_interval + +The maximum period to wait between retry attempts + + +Type: `string` +Default: `"10s"` + +```yml +# Examples + +max_interval: 5s + +max_interval: 1m +``` + +##### backoff.max_elapsed_time + +The maximum overall period of time to spend on retry attempts before the request is aborted. Setting this value to a zeroed duration (such as `0s`) will result in unbounded retries. + + +Type: `string` +Default: `"30s"` + +```yml +# Examples + +max_elapsed_time: 1m + +max_elapsed_time: 1h +``` + +### MQTT +Pushes messages to an MQTT broker. + +The topic field can be dynamically set using function interpolations described here. When sending batched messages these interpolations are performed per message part. + +#### Common +```yml +# Common config fields, showing default values +output: + label: "" + mqtt: + urls: [] # No default (required) + client_id: "" + connect_timeout: 30s + topic: "" # No default (required) + qos: 1 + write_timeout: 3s + retained: false + max_in_flight: 64 +``` + +#### Advanced +```yml +# All config fields, showing default values +output: + label: "" + mqtt: + urls: [] # No default (required) + client_id: "" + dynamic_client_id_suffix: "" # No default (optional) + connect_timeout: 30s + will: + enabled: false + qos: 0 + retained: false + topic: "" + payload: "" + user: "" + password: "" + keepalive: 30 + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] + topic: "" # No default (required) + qos: 1 + write_timeout: 3s + retained: false + retained_interpolated: "" # No default (optional) + max_in_flight: 64 +``` + +#### Performance + +This output benefits from sending multiple messages in flight in parallel for improved performance. You can tune the max number of in flight messages (or message batches) with the field `max_in_flight`. + +#### Fields + +##### urls + +A list of URLs to connect to. If an item of the list contains commas it will be expanded into multiple URLs. + + +Type: `array` + +```yml +# Examples + +urls: + - tcp://localhost:1883 +``` + +##### client_id + +An identifier for the client connection. + + +Type: `string` +Default: `""` + +##### dynamic_client_id_suffix + +Append a dynamically generated suffix to the specified `client_id` on each run of the pipeline. This can be useful when clustering Streams producers. + + +Type: `string` + +| Option | Summary | +| :--- | :--- | +| `nanoid` | append a nanoid of length 21 characters | + + +##### connect_timeout + +The maximum amount of time to wait in order to establish a connection before the attempt is abandoned. + + +Type: `string` +Default: `"30s"` +Requires version 1.0.0 or newer + +```yml +# Examples + +connect_timeout: 1s + +connect_timeout: 500ms +``` + +##### will + +Set last will message in case of Streams failure + + +Type: `object` + +##### will.enabled + +Whether to enable last will messages. + + +Type: `bool` +Default: `false` + +##### will.qos + +Set QoS for last will message. Valid values are: 0, 1, 2. + + +Type: `int` +Default: `0` + +##### will.retained + +Set retained for last will message. + + +Type: `bool` +Default: `false` + +##### will.topic + +Set topic for last will message. + + +Type: `string` +Default: `""` + +##### will.payload + +Set payload for last will message. + + +Type: `string` +Default: `""` + +##### user + +A username to connect with. + + +Type: `string` +Default: `""` + +##### password + +A password to connect with. + + +Type: `string` +Default: `""` + +##### keepalive + +Max seconds of inactivity before a keepalive message is sent. + + +Type: `int` +Default: `30` + +##### tls + +Custom TLS settings can be used to override system defaults. + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're seeing the error message `local error: tls: no renegotiation`. + + +Type: `bool` +Default: `false` +Requires version 1.0.0 or newer + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host certificate. + + +Type: `string` +Default: `""` + +```yml +# Examples + +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, but not both. + + +Type: `array` +Default: `[]` + +```yml +# Examples + +client_certs: + - cert: foo + key: bar + +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in PKCS#1 or PKCS#8 format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an attacker recover the plaintext. + + +Type: `string` +Default: `""` + +```yml +# Examples + +password: foo + +password: ${KEY_PASSWORD} +``` + +##### topic + +The topic to publish messages to. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + + +Type: `string` + +##### qos + +The QoS value to set for each message. Has options 0, 1, 2. + + +Type: `int` +Default: `1` + +##### write_timeout + +The maximum amount of time to wait to write data before the attempt is abandoned. + + +Type: `string` +Default: `"3s"` +Requires version 1.0.0 or newer + +```yml +# Examples + +write_timeout: 1s + +write_timeout: 500ms +``` + +##### retained + +Set message as retained on the topic. + + +Type: `bool` +Default: `false` + +##### retained_interpolated + +Override the value of `retained` with an interpolable value, this allows it to be dynamically set based on message contents. The value must resolve to either `true` or `false`. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + + +Type: `string` +Requires version 1.0.0 or newer + +##### max_in_flight + +The maximum number of messages to have in flight at a given time. Increase this to improve throughput. + + +Type: `int` +Default: `64` + + +### amqp_0_9 + +Sends messages to an AMQP (0.91) exchange. AMQP is a messaging protocol used by various message brokers, +including RabbitMQ. + +#### Common + +```yaml +# Common config fields, showing default values +output: + label: "" + amqp_0_9: + urls: [] # No default (required) + exchange: "" # No default (required) + key: "" + type: "" + metadata: + exclude_prefixes: [] + max_in_flight: 64 +``` + +#### Advanced + +```yaml +# All config fields, showing default values +output: + label: "" + amqp_0_9: + urls: [] # No default (required) + exchange: "" # No default (required) + exchange_declare: + enabled: false + type: direct + durable: true + key: "" + type: "" + content_type: application/octet-stream + content_encoding: "" + correlation_id: "" + reply_to: "" + expiration: "" + message_id: "" + user_id: "" + app_id: "" + metadata: + exclude_prefixes: [] + priority: "" + max_in_flight: 64 + persistent: false + mandatory: false + immediate: false + timeout: "" + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] +``` + +#### Metadata + +The metadata from each message is delivered as headers. + +It's possible for this output type to create the target exchange by setting `exchange_declare.enabled` to `true`, if the exchange +already exists then the declaration passively verifies that the settings match. + +TLS is automatic when connecting to an `amqps` URL, but custom settings can be enabled in the `tls` section. + +#### Fields + +##### urls + +A list of URLs to connect to. The first URL to successfully establish a connection will be used until the connection is closed. +If an item of the list contains commas, it will be expanded into multiple URLs. + +Type: `array` + +```yaml +# Examples +urls: + - amqp://guest:guest@127.0.0.1:5672/ +urls: + - amqp://127.0.0.1:5672/,amqp://127.0.0.2:5672/ +urls: + - amqp://127.0.0.1:5672/ + - amqp://127.0.0.2:5672/ +``` + +##### exchange + +An AMQP exchange to publish to. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` + +##### exchange_declare + +Optionally declare the target exchange (passive). + +Type: `object` + +##### exchange_declare.enabled + +Whether to declare the exchange. + +Type: `bool` +Default: `false` + +##### exchange_declare.type + +The type of the exchange. + +Type: `string` +Default: `"direct"` +Options: `direct`, `fanout`, `topic`, `x-custom` + +##### exchange_declare.durable + +Whether the exchange should be durable. + +Type: `bool` +Default: `true` + + +##### key + +The binding key to set for each message. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### type + +The type property to set for each message. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### content_type + +The content type attribute to set for each message. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `application/octet-stream` + +##### content_encoding + +The content encoding attribute to set for each message. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### correlation_id + +Set the correlation ID of each message with a dynamic interpolated expression. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### reply_to + +Carries response queue name - set with a dynamic interpolated expression. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### expiration + +Set the per-message TTL. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### message_id + +Set the message ID of each message with a dynamic interpolated expression. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### user_id + +Set the user ID to the name of the publisher. If this property is set by a publisher, its value must be equal to the name +of the user used to open the connection. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### app_id + +Set the application ID of each message with a dynamic interpolated expression. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +##### metadata + +Specify criteria for which metadata values are attached to messages as headers. + +Type: `object` + +##### metadata.exclude_prefixes + +Provide a list of explicit metadata key prefixes to be excluded when adding metadata to sent messages. + +Type: `array` +Default: `[]` + +##### priority + +Set the priority of each message with a dynamic interpolated expression. +{/* TODO: when interpolation supported: +This field supports interpolation functions. */} + +Type: `string` +Default: `""` + +```yaml +# Examples +priority: "0" +priority: ${! metadata("amqp_priority") } +priority: ${! json("doc.priority") } +``` + +##### max_in_flight + +The maximum number of messages to have in flight at a given time. Increase this to improve throughput. + +Type: `int` +Default: `64` + +##### persistent + +Whether message delivery should be persistent (transient by default). + +Type: `bool` +Default: `false` + +##### mandatory + +Whether to set the mandatory flag on published messages. When set if a published message is routed to zero queues, it is returned. + +Type: `bool` +Default: `false` + +##### immediate + +Whether to set the immediate flag on published messages. When set if there are no ready consumers of a queue, then the message is dropped instead of waiting. + +Type: `bool` +Default: `false` + +##### timeout + +The maximum period to wait before abandoning it and reattempting. If not set, wait indefinitely. + +Type: `string` +Default: `""` + +##### tls + +Custom TLS settings can be used to override system defaults. + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're getting the error message +`local error: tls: no renegotiation.` + +Type: `bool` +Default: `false` + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, +to possible intermediate signing certificates, to the host certificate. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +```yaml +# Examples +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing +a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host +certificate. + +Type: `string` +Default: `""` + +```yaml +# Examples +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, +but not both. + +Type: `array` +Default: `[]` + +```yaml +# Examples +client_certs: + - cert: foo + key: bar +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in *PKCS#1* or *PKCS#8* format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not +supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an +attacker recover the plaintext. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +```yaml +# Examples +password: foo +``` + +### amqp_1 + +Sends messages to an AMQP (1.0) server. + +#### Common + +```yaml +# Common config fields, showing default values +output: + label: "" + amqp_1: + urls: [] # No default (optional) + target_address: /foo # No default (required) + max_in_flight: 64 + metadata: + exclude_prefixes: [] +``` + +#### Advanced + +```yaml +# All config fields, showing default values +output: + label: "" + amqp_1: + urls: [] # No default (optional) + target_address: /foo # No default (required) + max_in_flight: 64 + tls: + enabled: false + skip_cert_verify: false + enable_renegotiation: false + root_cas: "" + root_cas_file: "" + client_certs: [] + application_properties_map: "" # No default (optional) + sasl: + mechanism: none + user: "" + password: "" + metadata: + exclude_prefixes: [] +``` + +#### Metadata + +Message metadata is added to each AMQP message as string annotations. To control which metadata keys are added, use the `metadata` config field. + +#### Performance + +This output benefits from sending multiple messages in flight in parallel for improved performance. You can tune the max number of in flight +messages (or message batches) with the field `max_in_flight`. + + +#### Fields + +##### urls + +A list of URLs to connect to. The first URL to successfully establish a connection will be used until the connection is closed. +If an item of the list contains commas it will be expanded into multiple URLs. + +Type: `array` + +```yaml +# Examples +urls: + - amqp://guest:guest@127.0.0.1:5672/ +urls: + - amqp://127.0.0.1:5672/,amqp://127.0.0.2:5672/ +urls: + - amqp://127.0.0.1:5672/ + - amqp://127.0.0.2:5672/ +``` + +##### target_address + +The target address to write to. + +Type: `string` + +```yaml +# Examples +target_address: /foo +target_address: queue:/bar +target_address: topic:/baz +``` + +##### max_in_flight + +The maximum number of messages to have in flight at a given time. Increase this to improve throughput. + +Type: `int` +Default: `64` + +##### tls + +Custom TLS settings can be used to override system defaults. + +Type: `object` + +##### tls.enabled + +Whether custom TLS settings are enabled. + +Type: `bool` +Default: `false` + +##### tls.skip_cert_verify + +Whether to skip server side certificate verification. + +Type: `bool` +Default: `false` + +##### tls.enable_renegotiation + +Whether to allow the remote server to repeatedly request renegotiation. Enable this option if you're getting the error message +`local error: tls: no renegotiation.` + +Type: `bool` +Default: `false` + +##### tls.root_cas + +An optional root certificate authority to use. This is a string, representing a certificate chain from the parent trusted root certificate, +to possible intermediate signing certificates, to the host certificate. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +```yaml +# Examples +root_cas: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +##### tls.root_cas_file + +An optional path of a root certificate authority file to use. This is a file, often with a .pem extension, containing +a certificate chain from the parent trusted root certificate, to possible intermediate signing certificates, to the host +certificate. + +Type: `string` +Default: `""` + +```yaml +# Examples +root_cas_file: ./root_cas.pem +``` + +##### tls.client_certs + +A list of client certificates to use. For each certificate either the fields `cert` and `key`, or `cert_file` and `key_file` should be specified, +but not both. + +Type: `array` +Default: `[]` + +```yaml +# Examples +client_certs: + - cert: foo + key: bar +client_certs: + - cert_file: ./example.pem + key_file: ./example.key +``` + +##### tls.client_certs[].cert + +A plain text certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key + +A plain text certificate key to use. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +##### tls.client_certs[].cert_file + +The path of a certificate to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].key_file + +The path of a certificate key to use. + +Type: `string` +Default: `""` + +##### tls.client_certs[].password + +A plain text password for when the private key is password encrypted in *PKCS#1* or *PKCS#8* format. The obsolete `pbeWithMD5AndDES-CBC` algorithm is not +supported for the PKCS#8 format. Warning: Since it does not authenticate the ciphertext, it is vulnerable to padding oracle attacks that can let an +attacker recover the plaintext. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + +```yaml +# Examples +password: foo +``` + +##### application_properties_map + +An optional Bloblang mapping that can be defined to set the `application-properties` on output messages. + +Type: `string` + +##### sasl + +Enables SASL authentication. + +Type: `object` + +##### sasl.mechanism + +The SASL authentication mechanism to use. + +Type: `string` +Default: `"none"` + +| Option | Summary | +| :----------- | :-------------------------------------- | +| anonymous | Anonymous SASL authentication. | +| none | No SASL based authentication. | +| plain | Plain text SASL authentication. | + + +##### sasl.user + +A SASL plain text username. It is recommended that you use environment variables to populate this field. + +Type: `string` +Default: `""` + +```yaml +# Examples +user: ${USER} +``` + +##### sasl.password + +A SASL plain text password. It is recommended that you use environment variables to populate this field. +{/* TODO add secrets link :::warning Secret +This field contains sensitive information that usually shouldn't be added to a config directly, read our [secrets page for more info](/docs/configuration/secrets). +::: */} + +Type: `string` +Default: `""` + + +```yaml +# Examples +password: ${PASSWORD} +``` + +##### metadata + +Specify criteria for which metadata values are attached to messages as headers. + +Type: `object` + +##### metadata.exclude_prefixes + +Provide a list of explicit metadata key prefixes to be excluded when adding metadata to sent messages. + +Type: `array` +Default: `[]` + +## Processors + +### Overview + +Tyk Streams processors are functions applied to messages passing through a pipeline. + +Processors are set via config, and depending on where in the config they are placed they will be run either immediately after a specific input (set in the input section), on all messages (set in the pipeline section) or before a specific output (set in the output section). Most processors apply to all messages and can be placed in the pipeline section: + +```yaml +pipeline: + threads: 1 + processors: + - label: my_avro + avro: + operator: "to_json" + encoding: textual +``` + +The `threads` field in the pipeline section determines how many parallel processing threads are created. You can read more about parallel processing in the [pipeline guide](/api-management/stream-config#processing-pipelines). + +#### Labels + +{/* TODO: Replace paragraph below in subsequent iteration when know if metrics supported from product + +Processors have an optional field `label` that can uniquely identify them in observability data such as metrics and logs. This can be useful when running configs with multiple nested processors, otherwise their metrics labels will be generated based on their composition. For more information check out the [metrics documentation]. */} + +Processors have an optional field `label` that can uniquely identify them in observability data such as logs. + +### Avro + +```yml +# Config fields, with default values +label: "" +avro: + operator: "" # No default (required) + encoding: textual + schema: "" + schema_path: "" +``` + + + +**Note** + +If you are consuming or generating messages using a schema registry service then it is likely this processor will fail as those services require messages to be prefixed with the identifier of the schema version being used. + + + +#### Operators + +##### to_json + +Converts Avro documents into a JSON structure. This makes it easier to +manipulate the contents of the document within Tyk Streams. The encoding field +specifies how the source documents are encoded. + + +##### from_json + +Attempts to convert JSON documents into Avro documents according to the +specified encoding. + +#### Fields + +##### operator + +The [operator](#operators) to execute + + +Type: `string` +Options: `to_json`, `from_json`. + +##### encoding + +An Avro encoding format to use for conversions to and from a schema. + + +Type: `string` +Default: `"textual"` +Options: `textual`, `binary`, `single`. + +##### schema + +A full Avro schema to use. + + +Type: `string` +Default: `""` + +##### schema_path + +The path of a schema document to apply. Use either this or the `schema` field. + + +Type: `string` +Default: `""` + +```yml +# Examples + +schema_path: file://path/to/spec.avsc + +schema_path: http://localhost:8081/path/to/spec/versions/1 +``` + +### Mapping + +Executes a Bloblang mapping on messages, creating a new document that replaces (or filters) the original message. + +Bloblang is a powerful language that enables various mapping, transformation, and filtering tasks. For more information, check out the [Bloblang docs](https://warpstreamlabs.github.io/bento/docs/guides/bloblang/about/). + +```yml +label: "" +mapping: "" # No default (required) +``` + +#### Example + +Given a JSON document with US location names and the states they are located in: +```json +{ + "locations": [ + {"name": "Seattle", "state": "WA"}, + {"name": "New York", "state": "NY"}, + {"name": "Bellevue", "state": "WA"}, + {"name": "Olympia", "state": "WA"} + ] +} +``` + +If we want to collapse the location names from the state of Washington into a field `Cities`: + +```json +{"Cities": "Bellevue, Olympia, Seattle"} +``` + +We could use the following bloblang mapping: + +```yml +pipeline: + processors: + - mapping: | + root.Cities = this.locations. + filter(loc -> loc.state == "WA"). + map_each(loc -> loc.name). + sort().join(", ") +``` + +#### Considerations + + - If a mapping fails, the message remains unchanged. However, Bloblang provides powerful ways to ensure your mappings do not fail by specifying desired fallback behaviour. See [this section of the Bloblang docs](https://warpstreamlabs.github.io/bento/docs/configuration/error_handling/). + - Mapping operates by creating an entirely new object during assignments. This has the advantage of treating the original referenced document as immutable and, therefore, queryable at any stage of your mapping. As a result, the `Cities` JSON document in the above example is a new, separate copy of the original document, which remains unchanged. + +## Tracers + +### Overview + +A tracer type represents a destination for Tyk Streams to send tracing events to such as [Jaeger](https://www.jaegertracing.io/). + +When a tracer is configured all messages will be allocated a root span during ingestion that represents their journey through a Streams pipeline. Many Streams processors create spans, and so tracing is a great way to analyse the pathways of individual messages as they progress through a Streams instance. + +Some inputs, such as `http_server` and `http_client`, are capable of extracting a root span from the source of the message (HTTP headers). This is +a work in progress and should eventually expand so that all inputs have a way of doing so. + +Other inputs, such as `kafka` can be configured to extract a root span by using the `extract_tracing_map` field. + +A tracer config section looks like this: + +```yaml +tracer: + jaeger: + agent_address: localhost:6831 + sampler_type: const + sampler_param: 1 +``` + + + +**Note** + +Although the configuration spec of this component is stable the format of spans, tags and logs created by Streams is subject to change as it is tuned for improvement. + + + +### Jaeger + +```yml +# Common config fields, showing default values +tracer: + jaeger: + agent_address: "" + collector_url: "" + sampler_type: const + flush_interval: "" # No default (optional) +``` + +#### Advanced + +```yml +# All config fields, showing default values +tracer: + jaeger: + agent_address: "" + collector_url: "" + sampler_type: const + sampler_param: 1 + tags: {} + flush_interval: "" # No default (optional) +``` + +Send tracing events to a [Jaeger](https://www.jaegertracing.io/) agent or collector. + +#### Fields + +##### agent_address + +The address of a Jaeger agent to send tracing events to. + +Type: `string` +Default: `""` + +```yml +# Examples + +agent_address: jaeger-agent:6831 +``` + +##### collector_url + +The URL of a Jaeger collector to send tracing events to. If set, this will override `agent_address`. + +Type: `string` +Default: `""` + +```yml +# Examples + +collector_url: https://jaeger-collector:14268/api/traces +``` + +##### sampler_type + +The sampler type to use. + +Type: `string` +Default: `"const"` + +| Option | Summary | +| :--- | :--- | +| `const` | Sample a percentage of traces. 1 or more means all traces are sampled, 0 means no traces are sampled and anything in between means a percentage of traces are sampled. Tuning the sampling rate is recommended for high-volume production workloads. | + +##### sampler_param + +A parameter to use for sampling. This field is unused for some sampling types. + +Type: `float` +Default: `1` + +##### tags + +A map of tags to add to tracing spans. + +Type: `object` +Default: `{}` + +##### flush_interval + +The period of time between each flush of tracing spans. + +Type: `string` + +### OpenTelemetry Collector + +```yml +# Common config fields, showing default values +tracer: + open_telemetry_collector: + http: [] # No default (required) + grpc: [] # No default (required) + sampling: + enabled: false + ratio: 0.85 # No default (optional) +``` + +#### Advanced + +```yml +# All config fields, showing default values +tracer: + open_telemetry_collector: + http: [] # No default (required) + grpc: [] # No default (required) + tags: {} + sampling: + enabled: false + ratio: 0.85 # No default (optional) +``` + + + +**Note** + +This component is experimental and therefore subject to change or removal outside of major version releases. + + + +Send tracing events to an [Open Telemetry collector](https://opentelemetry.io/docs/collector/). + +#### Fields + +##### http + +A list of http collectors. + +Type: `array` + +##### http[].address + +The endpoint of a collector to send tracing events to. + +Type: `string` + +```yml +# Examples + +address: localhost:4318 +``` + +##### http[].secure + +Connect to the collector over HTTPS + +Type: `bool` +Default: `false` + +##### grpc + +A list of grpc collectors. + +Type: `array` + +##### grpc[].address + +The endpoint of a collector to send tracing events to. + +Type: `string` + +```yml +# Examples + +address: localhost:4317 +``` + +##### grpc[].secure + +Connect to the collector with client transport security + +Type: `bool` +Default: `false` + +##### tags + +A map of tags to add to all tracing spans. + +Type: `object` +Default: `{}` + +##### sampling + +Settings for trace sampling. Sampling is recommended for high-volume production workloads. + +Type: `object` + +##### sampling.enabled + +Whether to enable sampling. + +Type: `bool` +Default: `false` + +##### sampling.ratio + +Sets the ratio of traces to sample. + +Type: `float` + +```yml +# Examples + +ratio: 0.85 + +ratio: 0.5 +``` + +## Metrics + +### Overview + +Streams emits lots of metrics in order to expose how components configured within your pipeline are behaving. You can configure exactly where these metrics end up with the config field `metrics`, which describes a metrics format and destination. For example, if you wished to push them via the Prometheus protocol you could use this configuration: + +```yaml +metrics: + prometheus: + push_interval: 1s + push_job_name: in + push_url: http://localhost:9091 +``` + +### Metric Names + +Metrics are emitted with a prefix that can be configured with the field `prefix`. The default prefix is `bento`. The following metrics are emitted with the respective types: + +#### Gauges + +- `{prefix}_input_count` Number of inputs currently active. +- `{prefix}_output_count` Number of outputs currently active. +- `{prefix}_processor_count` Number of processors currently active. +- `{prefix}_cache_count` Number of caches currently active. +- `{prefix}_condition_count` Number of conditions currently active. +- `{prefix}_input_connection_up` 1 if a particular input is connected, 0 if it is not. +- `{prefix}_output_connection_up` 1 if a particular output is connected, 0 if it is not. +- `{prefix}_input_running` 1 if a particular input is running, 0 if it is not. +- `{prefix}_output_running` 1 if a particular output is running, 0 if it is not. +- `{prefix}_processor_running` 1 if a particular processor is running, 0 if it is not. +- `{prefix}_cache_running` 1 if a particular cache is running, 0 if it is not. +- `{prefix}_condition_running` 1 if a particular condition is running, 0 if it is not. +- `{prefix}_buffer_running` 1 if a particular buffer is running, 0 if it is not. +- `{prefix}_buffer_available` The number of messages that can be read from a buffer. +- `{prefix}_input_retry` The number of active retry attempts for a particular input. +- `{prefix}_output_retry` The number of active retry attempts for a particular output. +- `{prefix}_processor_retry` The number of active retry attempts for a particular processor. +- `{prefix}_cache_retry` The number of active retry attempts for a particular cache. +- `{prefix}_condition_retry` The number of active retry attempts for a particular condition. +- `{prefix}_buffer_retry` The number of active retry attempts for a particular buffer. +- `{prefix}_threads_active` The number of processing threads currently active. + +#### Counters + +- `{prefix}_input_received` Count of messages received by a particular input. +- `{prefix}_input_batch_received` Count of batches received by a particular input. +- `{prefix}_output_sent` Count of messages sent by a particular output. +- `{prefix}_output_batch_sent` Count of batches sent by a particular output. +- `{prefix}_processor_processed` Count of messages processed by a particular processor. +- `{prefix}_processor_batch_processed` Count of batches processed by a particular processor. +- `{prefix}_processor_dropped` Count of messages dropped by a particular processor. +- `{prefix}_processor_batch_dropped` Count of batches dropped by a particular processor. +- `{prefix}_processor_error` Count of errors returned by a particular processor. +- `{prefix}_processor_batch_error` Count of batch errors returned by a particular processor. +- `{prefix}_cache_hit` Count of cache key lookups that found a value. +- `{prefix}_cache_miss` Count of cache key lookups that did not find a value. +- `{prefix}_cache_added` Count of new cache entries. +- `{prefix}_cache_err` Count of errors that occurred during a cache operation. +- `{prefix}_condition_hit` Count of condition checks that passed. +- `{prefix}_condition_miss` Count of condition checks that failed. +- `{prefix}_condition_error` Count of errors that occurred during a condition check. +- `{prefix}_buffer_added` Count of messages added to a particular buffer. +- `{prefix}_buffer_batch_added` Count of batches added to a particular buffer. +- `{prefix}_buffer_read` Count of messages read from a particular buffer. +- `{prefix}_buffer_batch_read` Count of batches read from a particular buffer. +- `{prefix}_buffer_ack` Count of messages removed from a particular buffer. +- `{prefix}_buffer_batch_ack` Count of batches removed from a particular buffer. +- `{prefix}_buffer_nack` Count of messages that failed to be removed from a particular buffer. +- `{prefix}_buffer_batch_nack` Count of batches that failed to be removed from a particular buffer. +- `{prefix}_buffer_err` Count of errors that occurred during a buffer operation. +- `{prefix}_buffer_batch_err` Count of batch errors that occurred during a buffer operation. +- `{prefix}_input_error` Count of errors that occurred during an input operation. +- `{prefix}_input_batch_error` Count of batch errors that occurred during an input operation. +- `{prefix}_output_error` Count of errors that occurred during an output operation. +- `{prefix}_output_batch_error` Count of batch errors that occurred during an output operation. +- `{prefix}_resource_cache_error` Count of errors that occurred during a resource cache operation. +- `{prefix}_resource_condition_error` Count of errors that occurred during a resource condition operation. +- `{prefix}_resource_input_error` Count of errors that occurred during a resource input operation. +- `{prefix}_resource_processor_error` Count of errors that occurred during a resource processor operation. +- `{prefix}_resource_output_error` Count of errors that occurred during a resource output operation. +- `{prefix}_resource_rate_limit_error` Count of errors that occurred during a resource rate limit operation. + +#### Timers + +- `{prefix}_input_latency` Latency of a particular input. +- `{prefix}_input_batch_latency` Latency of a particular input at the batch level. +- `{prefix}_output_latency` Latency of a particular output. +- `{prefix}_output_batch_latency` Latency of a particular output at the batch level. +- `{prefix}_processor_latency` Latency of a particular processor. +- `{prefix}_processor_batch_latency` Latency of a particular processor at the batch level. +- `{prefix}_condition_latency` Latency of a particular condition. +- `{prefix}_condition_batch_latency` Latency of a particular condition at the batch level. +- `{prefix}_cache_latency` Latency of a particular cache. +- `{prefix}_buffer_latency` Latency of a particular buffer. +- `{prefix}_buffer_batch_latency` Latency of a particular buffer at the batch level. + +### Metric Labels + +All metrics are emitted with the following labels: + +- `path` The path of the component within the config. +- `label` A custom label for the component, which is optional and falls back to the component type. + +### Prometheus + +```yml +# Common config fields, showing default values +metrics: + prometheus: + prefix: tyk + push_interval: "" + push_job_name: kafka_out + push_url: "" +``` + +#### Advanced + +```yml +# All config fields, showing default values +metrics: + prometheus: + prefix: tyk + push_interval: "" + push_job_name: my_stream + push_url: "" + push_basic_auth: + enabled: false + username: "" + password: "" + file_path: "" + use_histogram_timing: false + histogram_buckets: [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0] +``` + +Send metrics to a Prometheus push gateway, or expose them via HTTP endpoints. + +#### Fields + +##### prefix + +A string prefix for all metrics. + +Type: `string` +Default: `"bento"` + +##### push_interval + +The interval between pushing metrics to the push gateway. + +Type: `string` +Default: `""` + +```yml +# Examples + +push_interval: 1s + +push_interval: 1m +``` + +##### push_job_name + +A job name to attach to metrics pushed to the push gateway. + +Type: `string` +Default: `"bento_push"` + +##### push_url + +The URL to push metrics to. + +Type: `string` +Default: `""` + +```yml +# Examples + +push_url: http://localhost:9091 +``` + +##### push_basic_auth + +Basic authentication configuration for the push gateway. + +Type: `object` + +##### push_basic_auth.enabled + +Whether to use basic authentication when pushing metrics. + +Type: `bool` +Default: `false` + +##### push_basic_auth.username + +The username to authenticate with. + +Type: `string` +Default: `""` + +##### push_basic_auth.password + +The password to authenticate with. + +Type: `string` +Default: `""` + +##### file_path + +The file path to write metrics to. + +Type: `string` +Default: `""` + +```yml +# Examples + +file_path: /tmp/metrics.txt +``` + +##### use_histogram_timing + +Whether to use histogram metrics for timing values. When set to false, summary metrics are used instead. + +Type: `bool` +Default: `false` + +##### histogram_buckets + +A list of duration buckets to track when use_histogram_timing is set to true. + +Type: `array` +Default: `[0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0]` + +## Common Configuration + +### Batching + +Tyk Streams is able to join sources and sinks with sometimes conflicting batching behaviours without sacrificing its strong delivery guarantees. Therefore, batching within Tyk Streams is a mechanism that serves multiple purposes: + +1. [Performance (throughput)](#performance) +2. [Compatibility (mixing multi and single part message protocols)](#compatibility) + +#### Performance + +For most users the only benefit of batching messages is improving throughput over your output protocol. For some protocols this can happen in the background and requires no configuration from you. However, if an output has a `batching` configuration block this means it benefits from batching and requires you to specify how you'd like your batches to be formed by configuring a [batching policy](#batch-policy): + +```yaml +output: + kafka: + addresses: [ todo:9092 ] + topic: tyk_stream + + # Either send batches when they reach 10 messages or when 100ms has passed + # since the last batch. + batching: + count: 10 + period: 100ms +``` + +However, a small number of inputs such as [kafka](/api-management/stream-config#kafka) must be consumed sequentially (in this case by partition) and therefore benefit from specifying your batch policy at the input level instead: + +```yaml +input: + kafka: + addresses: [ todo:9092 ] + topics: [ tyk_input_stream ] + batching: + count: 10 + period: 100ms + +output: + kafka: + addresses: [ todo:9092 ] + topic: tyk_stream +``` + +Inputs that behave this way are documented as such and have a `batching` configuration block. + +Sometimes you may prefer to create your batches before processing, in which case if your input doesn't already support [a batch policy](#batch-policy) you can instead use a [broker](/api-management/stream-config#broker), which also allows you to combine inputs with a single batch policy: + +```yaml +input: + broker: + inputs: + - resource: foo + - resource: bar + batching: + count: 50 + period: 500ms +``` + +This also works the same with [output brokers](/api-management/stream-config#broker-1). + +#### Compatibility + +Tyk Streams is able to read and write over protocols that support multiple part messages, and all payloads travelling through Tyk Streams are represented as a multiple part message. Therefore, all components within Tyk Streams are able to work with multiple parts in a message as standard. + +When messages reach an output that *doesn't* support multiple parts the message is broken down into an individual message per part, and then one of two behaviours happen depending on the output. If the output supports batch sending messages then the collection of messages are sent as a single batch. Otherwise, Tyk Streams falls back to sending the messages sequentially in multiple, individual requests. + +This behaviour means that not only can multiple part message protocols be easily matched with single part protocols, but also the concept of multiple part messages and message batches are interchangeable within Tyk Streams. + +#### Batch Policy + +When an input or output component has a config field `batching` that means it supports a batch policy. This is a mechanism that allows you to configure exactly how your batching should work on messages before they are routed to the input or output it's associated with. Batches are considered complete and will be flushed downstream when either of the following conditions are met: + + +- The `byte_size` field is non-zero and the total size of the batch in bytes matches or exceeds it (disregarding metadata.) +- The `count` field is non-zero and the total number of messages in the batch matches or exceeds it. +- The `period` field is non-empty and the time since the last batch exceeds its value. + +This allows you to combine conditions: + +```yaml +output: + kafka: + addresses: [ todo:9092 ] + topic: tyk_stream + + # Either send batches when they reach 10 messages or when 100ms has passed + # since the last batch. + batching: + count: 10 + period: 100ms +``` + + + +A batch policy has the capability to *create* batches, but not to break them down. + + + +If your configured pipeline is processing messages that are batched *before* they reach the batch policy then they may circumvent the conditions you've specified here, resulting in sizes you aren't expecting. + +### Field Paths + +Many components within Tyk Streams allow you to target certain fields using a JSON dot path. The syntax of a path within Tyk Streams is similar to [JSON Pointers](https://tools.ietf.org/html/rfc6901), except with dot separators instead of slashes (and no leading dot.) When a path is used to set a value any path segment that does not yet exist in the structure is created as an object. + +For example, if we had the following JSON structure: + +```json +{ + "foo": { + "bar": 21 + } +} +``` + +The query path `foo.bar` would return `21`. + +The characters `~` (%x7E) and `.` (%x2E) have special meaning in Tyk Streams paths. Therefore `~` needs to be encoded as `~0` and `.` needs to be encoded as `~1` when these characters appear within a key. + +For example, if we had the following JSON structure: + +```json +{ + "foo.foo": { + "bar~bo": { + "": { + "baz": 22 + } + } + } +} +``` + +The query path `foo~1foo.bar~0bo..baz` would return `22`. + +#### Arrays + +When Tyk Streams encounters an array whilst traversing a JSON structure it requires the next path segment to be either an integer of an existing index, or, depending on whether the path is used to query or set the target value, the character `*` or `-` respectively. + +For example, if we had the following JSON structure: + +```json +{ + "foo": [ + 0, 1, { "bar": 23 } + ] +} +``` + +The query path `foo.2.bar` would return `23`. + +##### Querying + +When a query reaches an array the character `*` indicates that the query should return the value of the remaining path from each element of the array (within an array.) + +##### Setting + +When an array is reached the character `-` indicates that a new element should be appended to the end of the existing elements, if this character is not the final segment of the path then an object is created. + +### Processing Pipelines + +Within a Tyk Streams configuration, in between `input` and `output`, is a `pipeline` section. This section describes an array of processors that are to be applied to *all* messages, and are not bound to any particular input or output. + +If you have processors that are heavy on CPU and aren't specific to a certain input or output they are best suited for the pipeline section. It is advantageous to use the pipeline section as it allows you to set an explicit number of parallel threads of execution: + +```yaml +input: + resource: foo + +pipeline: + threads: 4 + processors: + - avro: + operator: "to_json" + +output: + resource: bar +``` + +If the field `threads` is set to `-1` (the default) it will automatically match the number of logical CPUs available. By default almost all Tyk Streams sources will utilize as many processing threads as have been configured, which makes horizontal scaling easy. diff --git a/api-management/streams-end-to-end-example.mdx b/api-management/streams-end-to-end-example.mdx new file mode 100644 index 000000000..e3dddfcfb --- /dev/null +++ b/api-management/streams-end-to-end-example.mdx @@ -0,0 +1,230 @@ +--- +title: "Tyk Streams End-to-End Example" +description: "A comprehensive end-to-end example of Tyk Streams implementation" +keywords: "Tyk Streams, Event-Driven APIs, Kafka, WebSockets, SSE, Correlation IDs" +sidebarTitle: "Tyk Streams End-to-End Example" +--- + +
+ +## Why Tyk Streams? + +Tyk Streams adds a **declarative event layer** on top of the Tyk Gateway, letting you expose or consume broker topics (Kafka, NATS, RabbitMQ…) through normal HTTP channelsβ€”REST, WebSocket, Server-Sent Eventsβ€”without glue code. + +You can manage stream definitions in three interchangeable ways: + +| Method | When to use | +| :-------- | :------------- | +| **Tyk Dashboard UI** | Rapid prototyping and PoCs | +| **OpenAPI + `x-tyk-streaming`** | β€œEverything-as-code”, safe for Git | +| **Tyk Operator (Kubernetes CRD)** | GitOps & CI/CD pipelines | + +--- + +## Requirements + +* **Tyk Gateway β‰₯ 5.8** with **Streams** feature enabled +* **Apache Kafka** reachable on `localhost:9093` +* *(Optional)* **Prometheus** and **Jaeger** if you enable the commented observability blocks + +--- + +## Architecture + +The demo shows a classic pattern: a user request becomes an event on a bus, a worker processes it asynchronously, and the result is delivered back to the same userβ€”without leaking data across tenants. + +```mermaid +sequenceDiagram + autonumber + participant Client + participant GatewayIn as Gateway
in stream + participant KafkaJobs as Kafka
topic **jobs** + participant Worker as Worker
Worker stream + participant KafkaCompleted as Kafka
topic **completed** + participant GatewayOut as Gateway
out stream + + %% synchronous request from client + Client ->> GatewayIn: POST /push-event **(sync)** + GatewayIn -->> Client: 200 OK + echo **(sync)** + + %% asynchronous event flow + GatewayIn -->> KafkaJobs: publish event **(async)** + KafkaJobs -->> Worker: consume job **(async)** + Worker -->> KafkaCompleted: publish result **(async)** + KafkaCompleted -->> GatewayOut: consume result **(async)** + + %% synchronous delivery back to the same user + GatewayOut ->> Client: GET /get-event / WS / SSE **(sync)** +``` + +### Stream-per-responsibility pattern + +| Stream | Role | Input | Output | +| :-------- | :------ | :------- | :-------- | +| **`in`** | Edge entrypoint: accepts HTTP, enriches payload (`user_id`, `job_id`), publishes to **`jobs`** and echoes to caller | HTTP | Kafka + sync response | +| **`Worker`** | Background micro-service: listens to **`jobs`**, attaches `result: "bar"`, publishes to **`completed`** | Kafka | Kafka | +| **`out`** | Edge exit point: listens to **`completed`**, drops messages not owned by caller, delivers via REST/WS/SSE | Kafka | HTTP | + +--- + +## Processor mapping (built-in scripting) + +Streams pipelines include **processors**. The *mapping* processor embeds [Bloblang](https://www.benthos.dev/docs/guides/bloblang/about/) so you can transform or filter messages inline: + +* **Enrich** – `in` adds `user_id` & `job_id` +* **Augment** – `Worker` adds a static field `{ "result": "bar" }` +* **Filter** – `out` calls `deleted()` for non-matching users + +Dynamic placeholders (`$tyk_context.…`) can reference query params, headers, JWT claims, or any other context variableβ€”usable anywhere in the Streams config. + +--- + +## Observability (optional) + +Uncomment the `metrics:` and `tracer:` blocks to push per-stream Prometheus metrics and Jaeger traces. Tags like `stream: Worker` make end-to-end tracing trivial. + +--- + +## Full OpenAPI definition + +Copy/paste into `streams-demo.yaml`, import via Dashboard UI, or apply with Tyk Operator: + +```yaml +info: + title: streams-demo + version: 1.0.0 +openapi: 3.0.3 +servers: + - url: http://tyk-gateway:8282/stream-demo/ +x-tyk-streaming: + streams: + Worker: + input: + kafka: + addresses: + - localhost:9093 + consumer_group: worker + topics: + - jobs + output: + kafka: + addresses: + - localhost:9093 + topic: completed + pipeline: + processors: + - mapping: | + root = this.merge({ "result": "bar" }) +# metrics: +# prometheus: +# push_interval: 1s +# push_job_name: Worker +# push_url: http://localhost:9091 +# tracer: +# jaeger: +# collector_url: http://localhost:14268/api/traces +# tags: +# stream: Worker + + in: + input: + http_server: + path: /push-event + ws_path: /ws-out + output: + broker: + outputs: + - kafka: + addresses: + - localhost:9093 + topic: jobs + - sync_response: {} + pipeline: + processors: + - mapping: | + root = this + root.user_id = "$tyk_context.request_data_user" # or $tyk_context.jwt.claims.sub + root.job_id = uuid_v4() +# tracer: +# jaeger: +# collector_url: http://localhost:14268/api/traces +# tags: +# stream: in +# metrics: +# prometheus: +# push_interval: 1s +# push_job_name: in +# push_url: http://localhost:9091 + + out: + input: + kafka: + addresses: + - localhost:9093 + consumer_group: $tyk_context.request_data_user + topics: + - completed + output: + http_server: + path: /get-event + ws_path: /ws-in + pipeline: + processors: + - mapping: | + root = if this.user_id != "$tyk_context.request_data_user" { + deleted() + } +# tracer: +# jaeger: +# collector_url: http://localhost:14268/api/traces +# tags: +# stream: out +# metrics: +# prometheus: +# push_interval: 1s +# push_job_name: out +# push_url: http://localhost:9091 +security: [] +paths: {} +components: + securitySchemes: {} +x-tyk-api-gateway: + info: + name: stream-demo + state: + active: true + internal: false + middleware: + global: + contextVariables: + enabled: true + trafficLogs: + enabled: true + server: + listenPath: + strip: true + value: /stream-demo/ + upstream: + proxy: + enabled: false + url: "" +``` + +--- + +## Running the demo + +1. **Start Kafka** (e.g. docker-compose). +2. **Launch Tyk Gateway 5.8+** with the YAML above. +3. **Send an event** + ```bash + curl -X POST "http://localhost:8282/stream-demo/push-event?user=alice" \ + -H "Content-Type: application/json" \ + -d '{"message":"hello world"}' + ``` +4. **Receive the result** (only *alice*’s jobs) + ```bash + curl "http://localhost:8282/stream-demo/get-event?user=alice" + ``` +5. **Switch transport** – connect via websocket `wscat -c http://127.0.0.1:8282/stream-demo/ws-in\?user\=alice` +6. *(Optional)* **Enable metrics & tracing** – uncomment blocks, restart Gateway, explore in Grafana & Jaeger. diff --git a/api-management/sync/quick-start.mdx b/api-management/sync/quick-start.mdx new file mode 100644 index 000000000..51ed0b0d5 --- /dev/null +++ b/api-management/sync/quick-start.mdx @@ -0,0 +1,107 @@ +--- +title: "Tyk Sync Quick Start Guide" +description: "Quick start guide for Tyk Sync to synchronize API configurations with Tyk Dashboard" +keywords: "Quick Start, Tyk Sync, API Management, Automations" +sidebarTitle: "Quick Start" +--- + +**Tyk Sync** is a command line tool and library to manage and synchronise a Tyk installation with your version control system (VCS). This guide will help you get started with Tyk Sync to manage your API configurations. + +## What We'll Cover in This Guide + +1. Set up Tyk Demo (gateway with prebuilt APIs) +2. Install Tyk Sync using Docker +3. Use Tyk Sync to dump API configurations from the Tyk Demo +4. Observe the dumped configurations +5. Make changes and sync back to Tyk Demo +6. Verify changes in Tyk Demo + +## Instructions + +### 1. Set Up Tyk Demo + +First, let's set up a Tyk Demo environment with some prebuilt APIs: + +```bash +# Pull and run Tyk Demo using Docker Compose +git clone https://github.com/TykTechnologies/tyk-pro-docker-demo +cd tyk-pro-docker-demo +./up.sh +``` + +This will start a Tyk Gateway and Dashboard with some sample APIs already configured. The Dashboard will be available at `http://localhost:3000`. + +### 2. Install Tyk Sync + +Follow this [guide](/product-stack/tyk-sync/installing-tyk-sync) to install Tyk Sync. You can either use the Docker image or download the binary directly. We will be using the Docker image for this quick start. + +### 3. Dump API Configurations + +Now, let's dump the API configurations from the Tyk Dashboard: + +```bash +# Create a directory to store your API configurations +mkdir -p tyk-sync-data + +# Get your Dashboard API key from the Dashboard UI (User menu > Profile) +# Replace YOUR_DASHBOARD_API_KEY with your actual key + +# Using Docker +docker run --rm -v $(pwd)/tyk-sync-data:/opt/tyk-sync/data --network=host tykio/tyk-sync:v2.1 dump -d="http://localhost:3000" -s="YOUR_DASHBOARD_API_KEY" -t="/opt/tyk-sync/data" + +# Or using the binary directly +tyk-sync dump -d="http://localhost:3000" -s="YOUR_DASHBOARD_API_KEY" -t="./tyk-sync-data" +``` + +This command will: +- Connect to your Tyk Dashboard at `http://localhost:3000` +- Use your Dashboard API key for authentication +- Extract all APIs and policies +- Save them to the `tyk-sync-data` directory + +### 4. Observe the Dumped Configurations + +Let's examine what was dumped: + +```bash +ls -la tyk-sync-data +``` + +You should see: +- A `.tyk.json` file (index file for synchronization) +- A `policies` directory containing policy definitions +- An `apis` directory containing API definitions + +Each API and policy is stored as a separate JSON file, making it easy to track changes in version control. + +### 5. Make Changes and Sync Back + +Now, let's modify an API definition and sync it back to the Dashboard: + +```bash +# Edit one of the API definition files +# For example, change the name of an API +# Then sync the changes back + +# Using Docker +docker run --rm -v $(pwd)/tyk-sync-data:/opt/tyk-sync/data --network=host tykio/tyk-sync:v2.1 update -d="http://localhost:3000" -s="YOUR_DASHBOARD_API_KEY" -p="/opt/tyk-sync/data" + +# Or using the binary directly +tyk-sync update -d="http://localhost:3000" -s="YOUR_DASHBOARD_API_KEY" -p="./tyk-sync-data" +``` + +This will update the API configurations in your Tyk Dashboard based on the local files. + +### 6. Verify Changes in Tyk Demo + +Open your Tyk Dashboard at `http://localhost:3000` and navigate to the APIs section. You should see that your changes have been applied. + +## Conclusion + +Tyk Sync provides a powerful way to manage your API configurations as code. By following this quick start guide, you've learned how to: +- Extract API configurations from a Tyk Dashboard +- Store them as files that can be version-controlled +- Modify and update configurations +- Synchronize configurations between different environments + +This approach helps ensure consistency across environments and enables you to implement Gitops for your API management. \ No newline at end of file diff --git a/api-management/sync/use-cases.mdx b/api-management/sync/use-cases.mdx new file mode 100644 index 000000000..fbee6f793 --- /dev/null +++ b/api-management/sync/use-cases.mdx @@ -0,0 +1,268 @@ +--- +title: "Automate API Configuration Management with Tyk Sync" +description: "Learn how to automate API configuration management using Tyk Sync and GitHub Actions." +keywords: "Tyk Sync, GitHub Actions, API Management, Automations" +sidebarTitle: "Use Cases" +--- + +By integrating GitHub Actions, teams can schedule backups to cloud storage, sync configurations from a Git repository, and update local API definitions directly to the Tyk Dashboard. These workflows ensure configurations are securely maintained, aligned across environments, and easily managed within the API lifecycle. + +## Backup API Configurations with Github Actions +API platform teams can automate configuration backups using GitHub Actions. By setting up a scheduled GitHub Action, API configurations can be periodically exported and stored in cloud storage, like AWS S3. This approach ensures backups remain up-to-date, offering a reliable way to safeguard data and simplify restoration if needed. + + +### Create a GitHub Action workflow + +1. In your repository, create a new file `.github/workflows/tyk-backup.yml`. +2. Add the following content to the `tyk-backup.yml` file: + +```yaml +name: Tyk Backup + +on: + schedule: + - cron: '0 0 * * *' # Runs every day at midnight + +jobs: + backup: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Create Backup Directory + run: | + BACKUP_DIR="backup/$(date +%Y-%m-%d)" + mkdir -p $BACKUP_DIR + echo "BACKUP_DIR=$BACKUP_DIR" >> $GITHUB_ENV + + - name: Set Permissions for Backup Directory + run: | + sudo chown -R 1001:1001 ${{ github.workspace }}/backup + + - name: Dump API Configurations + run: | + docker run --user 1001:1001 -v ${{ github.workspace }}:/app/data tykio/tyk-sync:${TYK_SYNC_VERSION} dump --target /app/data/${{ env.BACKUP_DIR }} --dashboard ${TYK_DASHBOARD_URL} --secret ${TYK_DASHBOARD_SECRET} + env: + TYK_SYNC_VERSION: ${{ vars.TYK_SYNC_VERSION }} + TYK_DASHBOARD_URL: ${{ secrets.TYK_DASHBOARD_URL }} + TYK_DASHBOARD_SECRET: ${{ secrets.TYK_DASHBOARD_SECRET }} + + - name: Upload to S3 + uses: jakejarvis/s3-sync-action@v0.5.1 + with: + args: --acl private --follow-symlinks --delete + env: + AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_REGION: 'us-east-1' # Change to your region + SOURCE_DIR: ${{ env.BACKUP_DIR }} +``` + +### Set up secrets + +1. Go to your GitHub repository. +2. Navigate to Settings > Secrets and variables > Actions. +3. Add the following variable: + - `TYK_SYNC_VERSION`: The version of Tyk Sync you want to use. +4. Add the following secrets: + - `TYK_DASHBOARD_URL`: The URL of your Tyk Dashboard. + - `TYK_DASHBOARD_SECRET`: The secret key for your Tyk Dashboard. + - `AWS_S3_BUCKET`: The name of your AWS S3 bucket. + - `AWS_ACCESS_KEY_ID`: Your AWS access key ID. + - `AWS_SECRET_ACCESS_KEY`: Your AWS secret access key. + +### Commit and push changes + +Commit the `tyk-backup.yml` file and push it to the main branch of your repository. + +### Verify backups + +The GitHub Action will run every day at midnight, dumping API configurations into a backup directory and uploading them to your specified S3 bucket. + + +## Synchronize API configurations with GitHub Actions +API platform teams can use GitHub Actions to sync API configurations, policies, and templates from a Git repository to Tyk. Triggered by repository changes, the action generates a .tyk.json file and applies updates with the sync command, keeping the Tyk setup aligned with the repository. + +### Setup GitHub repository +Organize your repository with the following structure: + +- `/apis/` for API definition files. +- `/policies/` for security policy files. +- `/assets/` for API template files. + +### Create a GitHub Action workflow + +1. In your repository, create a new file `.github/workflows/tyk-sync.yml`. +2. Add the following content to the `tyk-sync.yml` file: + +```yaml +name: Tyk Sync + +on: + push: + branches: + - main + +jobs: + sync: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Create .tyk.json + run: | + echo '{' > .tyk.json + echo ' "type": "apidef",' >> .tyk.json + echo ' "files": [' >> .tyk.json + find . -type f -name '*.json' -path './apis/*' -exec echo ' {"file": "{}"},' \; | sed '$ s/,$//' >> .tyk.json + echo ' ],' >> .tyk.json + echo ' "policies": [' >> .tyk.json + find . -type f -name '*.json' -path './policies/*' -exec echo ' {"file": "{}"},' \; | sed '$ s/,$//' >> .tyk.json + echo ' ],' >> .tyk.json + echo ' "assets": [' >> .tyk.json + find . -type f -name '*.json' -path './assets/*' -exec echo ' {"file": "{}"},' \; | sed '$ s/,$//' >> .tyk.json + echo ' ]' >> .tyk.json + echo '}' >> .tyk.json + cat .tyk.json + + - name: Sync with Tyk + run: | + docker run tykio/tyk-sync:${TYK_SYNC_VERSION} version + docker run -v ${{ github.workspace }}:/app/data tykio/tyk-sync:${TYK_SYNC_VERSION} sync --path /app/data --dashboard ${TYK_DASHBOARD_URL} --secret ${TYK_DASHBOARD_SECRET} + env: + TYK_SYNC_VERSION: ${{ vars.TYK_SYNC_VERSION }} + TYK_DASHBOARD_URL: ${{ secrets.TYK_DASHBOARD_URL }} + TYK_DASHBOARD_SECRET: ${{ secrets.TYK_DASHBOARD_SECRET }} +``` + +### Set up secrets + +1. Go to your GitHub repository. +2. Navigate to Settings > Secrets and variables > Actions. +3. Add the following variable: + - `TYK_SYNC_VERSION`: The version of Tyk Sync you want to use (e.g., v2.0.0). +4. Add the following secrets: + - `TYK_DASHBOARD_URL`: The URL of your Tyk Dashboard. + - `TYK_DASHBOARD_SECRET`: The secret key for your Tyk Dashboard. + +### Commit and push changes + +Commit the `tyk-sync.yml` file and push it to the main branch of your repository. + +### Verify synchronisation + +Each time there is a change in the repository, the GitHub Action will be triggered. It will create the `.tyk.json` file including all JSON files in the repository and use the `sync` command to update the Tyk installation. + + +## Update API Definitions locally +For API developers managing definitions locally, Tyk Sync's publish or update commands can upload local API definitions directly to the Tyk Dashboard, streamlining updates and keeping definitions in sync during development. Follow these steps to update your API definitions locally. + +### Prepare your API Definition + +Create your API definition file and save it locally. For example, save it as *api1.json* in a directory structure of your choice. + +### Create a .tyk.json index file + +In the root directory of your API definitions, create a `.tyk.json` file to list all API definition files that Tyk Sync should process. + +Example `.tyk.json`: +```json +{ + "type": "apidef", + "files": [ + { + "file": "api1.json" + } + ] +} +``` + +### Install Tyk Sync via Docker + +If you haven't installed Tyk Sync, you can do so via Docker: + +```bash +docker pull tykio/tyk-sync:v2.0.0 +``` + +### Publish API Definitions to Tyk + +Use the `publish` command to upload your local API definitions to Tyk. Use Docker bind mounts to access your local files. + +```bash +docker run -v /path/to/your/directory:/app/data tykio/tyk-sync:v2.0.0 publish \ + --path /app/data \ + --dashboard [DASHBOARD_URL] \ + --secret [SECRET] +``` + +### Update API Definitions to Tyk + +Similarly, to update existing API definitions, use the update command. + +```bash +docker run -v /path/to/your/directory:/app/data tykio/tyk-sync:v2.0.0 update \ + --path /app/data \ + --dashboard [DASHBOARD_URL] \ + --secret [SECRET] +``` + +### Verify the update + +Log in to your Tyk Dashboard to verify that the API definitions have been published or updated successfully. + + +## Specify Source API Configurations +For the `sync`, `update`, and `publish` commands, you need to specify where Tyk Sync can get the source API configurations to update the target Tyk installation. You can store the source files either in a Git repository or the local file system. + +### Working with Git +For any Tyk Sync command that requires Git repository access, specify the Git repository as the first argument after the command. By default, Tyk Sync reads from the `master` branch. To specify a different branch, use the `--branch` or `-b` flag. If the Git repository requires connection using Secure Shell Protocol (SSH), you can specify SSH keys with `--key` or `-k` flag. + +```bash +tyk-sync [command] https://github.com/your-repo --branch develop +``` + +### Working with the local file system +To update API configurations from the local file system, use the `--path` or `-p` flag to specify the source directory for your API configuration files. + +```bash +tyk-sync [command] --path /path/to/local/directory +``` + +### Index File Requirement +A `.tyk.json` index file is required at the root of the source Git repository or the specified path. This `.tyk.json` file lists all the files that should be processed by Tyk Sync. + +Example `.tyk.json`: +```json +{ + "type": "apidef", + "files": [ + { + "file": "api1/api1.json" + }, + { + "file": "api2/api2.json" + }, + { + "file": "api3.json" + } + ], + "policies": [ + { + "file": "policy1.json" + } + ], + "assets": [ + { + "file": "template1.json" + } + ] +} +``` + + diff --git a/api-management/traffic-transformation.mdx b/api-management/traffic-transformation.mdx new file mode 100644 index 000000000..f3f85ce21 --- /dev/null +++ b/api-management/traffic-transformation.mdx @@ -0,0 +1,107 @@ +--- +title: "Transform Traffic by using Tyk Middleware" +description: "Learn how to transform API traffic using Tyk's middleware capabilities." +keywords: "Overview, Allow List, Block List, Ignore Authentication, Internal Endpoint, Request Method , Request Body , Request Headers , Response Body, Response Headers, Request Validation, Mock Response, Virtual Endpoints, Go Templates, JQ Transforms, Request Context Variables" +sidebarTitle: "Overview" +--- + +## Overview + +When you configure an API on Tyk, the Gateway will proxy all requests received at the listen path that you have defined through to the upstream (target) URL configured in the API definition. Responses from the upstream are likewise proxied on to the originating client. Requests and responses are processed through a powerful [chain of middleware](/api-management/traffic-transformation#request-middleware-chain) that perform security and processing functions. + +Within that chain are a highly configurable set of optional middleware that can, on a per-endpint basis: +- apply processing to [API requests](#middleware-applied-to-the-api-request) before they are proxied to the upstream service +- apply customization to the [API response](#middleware-applied-to-the-api-response) prior to it being proxied back to the client + +Tyk also supports a powerful custom plugin feature that enables you to add custom processing at different stages in the processing chains. For more details on custom plugins please see the [dedicated guide](/api-management/plugins/overview#). + +### Middleware applied to the API Request + +The following standard middleware can optionally be applied to API requests on a per-endpoint basis. + +#### Allow list + +The [Allow List](/api-management/traffic-transformation/allow-list) middleware is a feature designed to restrict access to only specific API endpoints. It rejects requests to endpoints not specifically "allowed", returning `HTTP 403 Forbidden`. This enhances the security of the API by preventing unauthorized access to endpoints that are not explicitly permitted. + +Enabling the allow list will cause the entire API to become blocked other than for endpoints that have this middleware enabled. This is great if you wish to have very strict access rules for your services, limiting access to specific published endpoints. + +#### Block list + +The [Block List](/api-management/traffic-transformation/block-list) middleware is a feature designed to prevent access to specific API endpoints. Tyk Gateway rejects all requests made to endpoints with the block list enabled, returning `HTTP 403 Forbidden`. + +#### Cache + +Tyk's [API-level cache](/api-management/response-caching#basic-caching) does not discriminate between endpoints and will usually be configured to cache all safe requests. You can use the granular [Endpoint Cache](/api-management/response-caching#endpoint-caching) to ensure finer control over which API responses are cached by Tyk. + +#### Circuit Breaker + +The [Circuit Breaker](/planning-for-production/ensure-high-availability/circuit-breakers) is a protective mechanism that helps to maintain system stability by preventing repeated failures and overloading of services that are erroring. When a network or service failure occurs, the circuit breaker prevents further calls to that service, allowing the affected service time to recover while ensuring that the overall system remains functional. + +#### Do Not Track Endpoint + +If [traffic logging](/api-management/logs-metrics#api-traffic-logs) is enabled for your Tyk Gateway, then it will create transaction logs for all API requests (and responses) to deployed APIs. You can use the [Do-Not-Track](/api-management/traffic-transformation/do-not-track) middleware to suppress creation of transaction records for specific endpoints. + +#### Enforced Timeout + +Tyk’s [Enforced Timeout](/planning-for-production/ensure-high-availability/circuit-breakers) middleware can be used to apply a maximum time that the Gateway will wait for a response before it terminates (or times out) the request. This helps to maintain system stability and prevents unresponsive or long-running tasks from affecting the overall performance of the system. + +#### Ignore Authentication + +Adding the [Ignore Authentication](/api-management/traffic-transformation/ignore-authentication) middleware means that Tyk Gateway will not perform authentication checks on requests to that endpoint. This plugin can be very useful if you have a specific endpoint (such as a ping) that you don't need to secure. + +#### Internal Endpoint + +The [Internal Endpoint](/api-management/traffic-transformation/internal-endpoint) middleware instructs Tyk Gateway not to expose the endpoint externally. Tyk Gateway will then ignore external requests to that endpoint while continuing to process internal requests from other APIs; this is often used with the [internal looping](/advanced-configuration/transform-traffic/looping) functionality. + +#### Method Transformation + +The [Method Transformation](/api-management/traffic-transformation/request-method) middleware allows you to change the HTTP method of a request. + +#### Mock Response + +A [Mock Response](/api-management/traffic-transformation/mock-response) is a simulated API response that can be returned by the API gateway without actually sending the request to the backend API. Mock responses are an integral feature for API development, enabling developers to emulate API behavior without the need for upstream execution. + +#### Request Body Transform + +The [Request Body Transform](/api-management/traffic-transformation/request-body) middleware allows you to perform modification to the body (payload) of the API request to ensure that it meets the requirements of your upstream service. + +#### Request Header Transform + +The [Request Header Transform](/api-management/traffic-transformation/request-headers) middleware allows you to modify the header information provided in the request before it leaves the Gateway and is passed to your upstream API. + +#### Request Size Limit + +Tyk Gateway offers a flexible tiered system of limiting request sizes ranging from globally applied limits across all APIs deployed on the gateway down to specific size limits for individual API endpoints. The [Request Size Limit](/api-management/traffic-transformation/request-size-limits) middleware provides the most granular control over request size by enabling you to set different limits for individual endpoints. + +#### Request Validation + +Tyk’s [Request Validation](/api-management/traffic-transformation/request-validation) middleware provides a way to validate the presence, correctness and conformity of HTTP requests to make sure they meet the expected format required by the upstream API endpoints. + +When working with Tyk OAS APIs, the request validation covers both headers and body (payload); with the older Tyk Classic API style we can validate only the request body (payload). + +#### Track Endpoint + +If you do not want to include all endpoints in your [Activity by Endpoint](/api-management/dashboard-configuration#activity-by-endpoint) statistics in Tyk Dashboard, you can enable this middleware for the endpoints to be included. + +#### URL Rewrite + +[URL Rewriting](/transform-traffic/url-rewriting#url-rewrite-middleware) in Tyk is a powerful feature that enables the modification of incoming API request paths to match the expected endpoint format of your backend services. This allows you to translate an outbound API interface to the internal structure of your services. It is a key capability used in [internal looping](/advanced-configuration/transform-traffic/looping) + +#### Virtual Endpoint + +Tyk’s [Virtual Endpoints](/api-management/traffic-transformation/virtual-endpoints) is a programmable middleware component that allows you to perform complex interactions with your upstream service(s) that cannot be handled by one of the other middleware components. + +### Middleware applied to the API Response + +The following transformations can be applied to the response recieved from the upstream to ensure that it contains the correct data and format expected by your clients. + +#### Response Body Transform + +The [Response Body Transform](/api-management/traffic-transformation/response-body) middleware allows you to perform modification to the body (payload) of the response received from the upstream service to ensure that it meets the expectations of the client. + +#### Response Header Transform + +The [Response Header Transform](/api-management/traffic-transformation/response-headers) middleware allows you to modify the header information provided in the response before it leaves the Gateway and is passed to the client. +### Request Middleware Chain + +Middleware execution flow diff --git a/api-management/traffic-transformation/allow-list.mdx b/api-management/traffic-transformation/allow-list.mdx new file mode 100644 index 000000000..b7ac487c5 --- /dev/null +++ b/api-management/traffic-transformation/allow-list.mdx @@ -0,0 +1,320 @@ +--- +title: "Allow List" +description: "How to configure Allow List traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Allow List" +sidebarTitle: "Allow List" +--- + +## Overview + +The Allow List middleware is a feature designed to restrict access to only specific API endpoints. It rejects requests to endpoints not specifically "allowed", returning `HTTP 403 Forbidden`. This enhances the security of the API by preventing unauthorized access to endpoints that are not explicitly permitted. + +Note that this is not the same as Tyk's [IP allow list](/api-management/gateway-config-tyk-classic#ip-access-control) feature, which is used to restrict access to APIs based upon the IP of the requestor. + +### Use Cases + +#### Restricting access to private endpoints + +If you have a service that exposes endpoints or supports methods that you do not want to be available to clients, you should use the allow list to perform strict restriction to a subset of methods and paths. If the allow list is not enabled, requests to endpoints that are not explicitly defined in Tyk will be proxied to the upstream service and may lead to unexpected behavior. + +### Working + +Tyk Gateway does not actually maintain a list of allowed endpoints but rather works on the model whereby if the *allow list* middleware is added to an endpoint then this will automatically block all other endpoints. + +Tyk Gateway will subsequently return `HTTP 403 Forbidden` to any requested endpoint that doesn't have the *allow list* middleware enabled, even if the endpoint is defined and configured in the API definition. + +
+ + +If you enable the allow list feature by adding the middleware to any endpoint, ensure that you also add the middleware to any other endpoint for which you wish to accept requests. + + + +#### Case sensitivity + +By default the allow list is case-sensitive, so for example if you have defined the endpoint `GET /userID` in your API definition then only calls to `GET /userID` will be allowed: calls to `GET /UserID` or `GET /userid` will be rejected. You can configure the middleware to be case-insensitive at the endpoint level. + +You can also set case sensitivity for the entire [gateway](/tyk-oss-gateway/configuration#ignore_endpoint_case) in the Gateway configuration file `tyk.conf`. If case insensitivity is configured at the gateway level, this will override the endpoint-level setting. + +#### Endpoint parsing + +When using the allow list middleware, we recommend that you familiarize yourself with Tyk's [URL matching](/getting-started/key-concepts/url-matching) options. + +
+ + +Tyk recommends that you use [exact](/getting-started/key-concepts/url-matching#exact-match) matching for maximum security, though prefix and wildcard strategies might also apply for your particular deployment or use case. + + + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Allow List middleware summary + - The Allow List is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Allow List can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + +## Using Tyk OAS + + +The [allow list](/api-management/traffic-transformation/allow-list) is a feature designed to restrict access to only specific API endpoints. It rejects requests to endpoints not specifically "allowed", returning `HTTP 403 Forbidden`. This enhances the security of the API by preventing unauthorized access to endpoints that are not explicitly permitted. + +When working with Tyk OAS APIs the middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#allow-list-using-classic) page. + + +### API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The allow list middleware (`allow`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `allow` object has the following configuration: + +- `enabled`: enable the middleware for the endpoint +- `ignoreCase`: if set to `true` then the path matching will be case insensitive + +For example: + +```json {hl_lines=["47-50", "53-56"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-allow-list", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + }, + "put": { + "operationId": "anythingput", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-allow-list", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-allow-list/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "allow": { + "enabled": true, + "ignoreCase": true + } + }, + "anythingput": { + "allow": { + "enabled": true, + "ignoreCase": true + } + } + } + } + } +} +``` + +In this example the allow list middleware has been configured for requests to the `GET /anything` and `PUT /anything` endpoints. Requests to any other endpoints will be rejected with `HTTP 403 Forbidden`, unless they also have the allow list middleware enabled. +Note that the allow list has been configured to be case insensitive, so calls to `GET /Anything` will be allowed +Note also that the endpoint path has not been terminated with `$`. Requests to, for example, `GET /anything/foobar` will be allowed as the [regular expression pattern match](#endpoint-parsing) will recognize this as `GET /anything`. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the allow list feature. + +### API Designer + +Adding the allow list to your API endpoints is easy is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Allow List middleware** + + Select **ADD MIDDLEWARE** and choose the **Allow List** middleware from the *Add Middleware* screen. + + Adding the Allow List middleware + +3. **Optionally configure case-insensitivity** + + If you want to disable case-sensitivity for the allow list, then you must select **EDIT** on the Allow List icon. + + Allow List middleware added to endpoint - click through to edit the config + + This takes you to the middleware configuration screen where you can alter the case sensitivity setting. + Configuring case sensitivity for the Allow List + + Select **UPDATE MIDDLEWARE** to apply the change to the middleware configuration. + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +## Using Classic + + +The [allow list](/api-management/traffic-transformation/allow-list) is a feature designed to restrict access to only specific API endpoints. It rejects requests to endpoints not specifically "allowed", returning `HTTP 403 Forbidden`. This enhances the security of the API by preventing unauthorized access to endpoints that are not explicitly permitted. + +When working with Tyk Classic APIs the middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#allow-list-using-tyk-oas) page. + +### API Definition + +To enable and configure the allow list you must add a new `white_list` object to the `extended_paths` section of your API definition. + + + +Historically, Tyk followed the out-dated whitelist/blacklist naming convention. We are working to remove this terminology from the product and documentation, however this configuration object currently retains the old name. + + + +The `white_list` object has the following configuration: + +- `path`: the endpoint path +- `method`: this should be blank +- `ignore_case`: if set to `true` then the path matching will be case insensitive +- `method_actions`: a shared object used to configure the [mock response](/api-management/traffic-transformation/mock-response#configuring-mock-response-using-tyk-dashboard-ui) middleware + +The `method_actions` object should be configured as follows, with an entry created for each allowed method on the path: + +- `action`: this should be set to `no_action` +- `code`: this should be set to `200` +- `headers` : this should be blank + +For example: + +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "white_list": [ + { + "disabled": false, + "path": "/status/200", + "method": "", + "ignore_case": false, + "method_actions": { + "GET": { + "action": "no_action", + "code": 200, + "headers": {} + }, + "PUT": { + "action": "no_action", + "code": 200, + "headers": {} + } + } + } + ] + } +} +``` + +In this example the allow list middleware has been configured for HTTP `GET` and `PUT` requests to the `/status/200` endpoint. Requests to any other endpoints will be rejected with `HTTP 403 Forbidden`, unless they also have the allow list middleware enabled. +Note that the allow list has been configured to be case sensitive, so calls to `GET /Status/200` will also be rejected. +Note also that the endpoint path has not been terminated with `$`. Requests to, for example, `GET /status/200/foobar` will be allowed as the [regular expression pattern match](#endpoint-parsing) will recognize this as `GET /status/200`. + +Consult section [configuring the Allow List in Tyk Operator](#tyk-operator) for details on how to configure allow lists for endpoints using Tyk Operator. + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the allow list middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer**, add an endpoint that matches the path for which you want to allow access. Select the **Whitelist** plugin. + +2. **Configure the allow list** + + Once you have selected the middleware for the endpoint, the only additional feature that you need to configure is whether to make the middleware case insensitive by selecting **Ignore Case**. + + Allowlist options + +3. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the allow list middleware. + +### Tyk Operator + +Similar to the configuration of a Tyk Classic API Definition you must add a new `white_list` object to the `extended_paths` section of your API definition. Furthermore, the `use_extended_paths` configuration parameter should be set to `true`. + + + +Historically, Tyk followed the out-dated whitelist/blacklist naming convention. We are working to remove this terminology from the product and documentation, however this configuration object currently retains the old name. + + + +```yaml {linenos=true,linenostart=1,hl_lines=["26-34"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-whitelist +spec: + name: httpbin-whitelist + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org/ + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + white_list: + - ignore_case: true + method_actions: + GET: + action: "no_action" + code: 200 + data: "" + headers: {} + path: "/get" +``` + +In this example the allow list middleware has been configured for `HTTP GET` requests to the `/get` endpoint. Requests to any other endpoints will be rejected with `HTTP 403 Forbidden`, unless they also have the allow list middleware enabled. Note that the allow list has been configured to case insensitive, so calls to `GET /Get` will also be accepted. Note also that the endpoint path has not been terminated with `$`. Requests to, for example, `GET /get/foobar` will be allowed as the [regular expression pattern match](#endpoint-parsing) will recognize this as `GET /get`. + + diff --git a/api-management/traffic-transformation/block-list.mdx b/api-management/traffic-transformation/block-list.mdx new file mode 100644 index 000000000..cbfa350e2 --- /dev/null +++ b/api-management/traffic-transformation/block-list.mdx @@ -0,0 +1,309 @@ +--- +title: "Block List" +description: "How to configure Block List traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Block List" +sidebarTitle: "Block List" +--- + +## Overview + +The Block List middleware is a feature designed to block access to specific API endpoints. Tyk Gateway rejects all requests made to endpoints with the block list enabled, returning `HTTP 403 Forbidden`. + +Note that this is not the same as Tyk's [IP block list](/api-management/gateway-config-tyk-classic#ip-access-control) feature, which is used to restrict access to APIs based upon the IP of the requestor. + +### Use Cases + +#### Prevent access to deprecated resources + +If you are versioning your API and deprecating an endpoint then, instead of having to remove the functionality from your upstream service's API you can simply block access to it using the block list middleware. + +### Working + +Tyk Gateway does not actually maintain a list of blocked endpoints but rather works on the model whereby if the *block list* middleware is added to an endpoint then any request to that endpoint will be rejected, returning `HTTP 403 Forbidden`. + +#### Case sensitivity + +By default the block list is case-sensitive, so for example if you have defined the endpoint `GET /userID` in your API definition then only calls to `GET /userID` will be blocked: calls to `GET /UserID` or `GET /userid` will be allowed. You can configure the middleware to be case-insensitive at the endpoint level. + +You can also set case sensitivity for the entire [gateway](/tyk-oss-gateway/configuration#ignore_endpoint_case) in the Gateway configuration file `tyk.conf`. If case insensitivity is configured at the gateway level, this will override the endpoint-level setting. + +#### Endpoint parsing + +When using the block list middleware, we recommend that you familiarize yourself with Tyk's [URL matching](/getting-started/key-concepts/url-matching) options. + +
+ + +Tyk recommends that you use [exact](/getting-started/key-concepts/url-matching#exact-match) matching for maximum security, though prefix and wildcard strategies might also apply for your particular deployment or use case. + + + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Block List middleware summary + - The Block List is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Block List can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + +## Using Tyk OAS + + +The [block list](/api-management/traffic-transformation/block-list) is a feature designed to block access to specific API endpoints. Tyk Gateway rejects all requests made to endpoints with the block list enabled, returning `HTTP 403 Forbidden`. + +When working with Tyk OAS APIs the middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#block-list-using-classic) page. + +### API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. The `path` can contain wildcards in the form of any string bracketed by curly braces, for example `{user_id}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The block list middleware (`block`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `block` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `ignoreCase`: if set to `true` then the path matching will be case insensitive + +For example: +```json {hl_lines=["47-50", "53-56"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-block-list", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + }, + "put": { + "operationId": "anythingput", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-block-list", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-block-list/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "block": { + "enabled": true, + "ignoreCase": true + } + }, + "anythingput": { + "block": { + "enabled": true, + "ignoreCase": true + } + } + } + } + } +} +``` + +In this example the block list middleware has been configured for requests to the `GET /anything` and `PUT /anything` endpoints. Requests to these endpoints will be rejected with `HTTP 403 Forbidden`. +Note that the block list has been configured to be case insensitive, so calls to `GET /Anything` will also be blocked. +Note also that the endpoint path has not been terminated with `$`. Requests to, for example, `GET /anything/foobar` will be rejected as the [regular expression pattern match](#endpoint-parsing) will recognize this as `GET /anything`. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the block list feature. + +### API Designer + +Adding the block list to your API endpoints is easy is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Block List middleware** + + Select **ADD MIDDLEWARE** and choose the **Block List** middleware from the *Add Middleware* screen. + + Adding the Block List middleware + +3. **Optionally configure case-insensitivity** + + If you want to disable case-sensitivity for the block list, then you must select **EDIT** on the Block List icon. + + Block List middleware added to endpoint - click through to edit the config + + This takes you to the middleware configuration screen where you can alter the case sensitivity setting. + Configuring case sensitivity for the Block List + + Select **UPDATE MIDDLEWARE** to apply the change to the middleware configuration. + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +## Using Classic + + +The [block list](/api-management/traffic-transformation/block-list) is a feature designed to block access to specific API endpoints. Tyk Gateway rejects all requests made to endpoints with the block list enabled, returning `HTTP 403 Forbidden`. + +When working with Tyk Classic APIs the middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#block-list-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the block list in Tyk Operator](#tyk-operator) section below. + +### API Definition + +To enable and configure the block list you must add a new `black_list` object to the `extended_paths` section of your API definition. + + + +Historically, Tyk followed the out-dated whitelist/blacklist naming convention. We are working to remove this terminology from the product and documentation, however this configuration object currently retains the old name. + + + +The `black_list` object has the following configuration: +- `path`: the endpoint path +- `method`: this should be blank +- `ignore_case`: if set to `true` then the path matching will be case insensitive +- `method_actions`: a shared object used to configure the [mock response](/api-management/traffic-transformation/mock-response#when-is-it-useful) middleware + +The `method_actions` object should be configured as follows, with an entry created for each blocked method on the path: +- `action`: this should be set to `no_action` +- `code`: this should be set to `200` +- `headers` : this should be blank + +For example: +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "black_list": [ + { + "disabled": false, + "path": "/status/200", + "method": "", + "ignore_case": false, + "method_actions": { + "GET": { + "action": "no_action", + "code": 200, + "headers": {} + } + "PUT": { + "action": "no_action", + "code": 200, + "headers": {} + } + } + } + ] + } +} +``` + +In this example the block list middleware has been configured for HTTP `GET` and `PUT` requests to the `/status/200` endpoint. Requests to these endpoints will be rejected with `HTTP 403 Forbidden`. +Note that the block list has been configured to be case sensitive, so calls to `GET /Status/200` will not be rejected. +Note also that the endpoint path has not been terminated with `$`. Requests to, for example, `GET /status/200/foobar` will be rejected as the [regular expression pattern match](#endpoint-parsing) will recognize this as `GET /status/200`. + +Consult section [configuring the Allow List in Tyk Operator](#tyk-operator) for details on how to configure allow lists for endpoints using Tyk Operator. + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the block list middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to prevent access. Select the **Blacklist** plugin. + +2. **Configure the block list** + + Once you have selected the middleware for the endpoint, the only additional feature that you need to configure is whether to make the middleware case insensitive by selecting **Ignore Case**. + + Blocklist options + +3. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +### Tyk Operator + +Similar to the configuration of a Tyk Classic API Definition you must add a new `black_list` object to the `extended_paths` section of your API definition. Furthermore, the `use_extended_paths` configuration parameter should be set to `true`. + + + +Historically, Tyk followed the out-dated whitelist/blacklist naming convention. We are working to remove this terminology from the product and documentation, however this configuration object currently retains the old name. + + + +```yaml {linenos=true, linenostart=1, hl_lines=["26-34"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-blacklist +spec: + name: httpbin-blacklist + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org/ + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + black_list: + - ignore_case: true + method_actions: + GET: + action: "no_action" + code: 200 + data: "" + headers: {} + path: "/get" +``` + +In this example the block list middleware has been configured for HTTP `GET` requests to the `/get` endpoint. Requests to this endpoint will be rejected with `HTTP 403 Forbidden`. +Note that the block list has been configured to be case insensitive, so calls to `GET /Get` will not be rejected. +Note also that the endpoint path has not been terminated with `$`. Requests to, for example, `GET /get/foobar` will be rejected as the [regular expression pattern match](#endpoint-parsing) will recognize this as `GET /get`. + + + diff --git a/api-management/traffic-transformation/do-not-track.mdx b/api-management/traffic-transformation/do-not-track.mdx new file mode 100644 index 000000000..1ea65f8e8 --- /dev/null +++ b/api-management/traffic-transformation/do-not-track.mdx @@ -0,0 +1,264 @@ +--- +title: "Do Not Track" +description: "How to configure Do Not Track traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Do Not Track" +sidebarTitle: "Do Not Track" +--- + +## Overview + + +When [transaction logging](/api-management/logs-metrics#api-traffic-logs) is enabled in the Tyk Gateway, a transaction record will be generated for every request made to an API endpoint deployed on the gateway. You can suppress the generation of transaction records for any API by enabling the do-not-track middleware. This provides granular control over request tracking. + +### Use Cases + +#### Compliance and privacy + +Disabling tracking on endpoints that handle personal or sensitive information is crucial for adhering to privacy laws such as GDPR or HIPAA. This action prevents the storage and logging of sensitive data, ensuring compliance and safeguarding user privacy. + +#### Optimizing performance + +For endpoints experiencing high traffic, disabling tracking can mitigate the impact on the analytics processing pipeline and storage systems. Disabling tracking on endpoints used primarily for health checks or load balancing can prevent the analytics data from being cluttered with information that offers little insight. These optimizations help to maintain system responsiveness and efficiency by reducing unnecessary data load and help to ensure that analytics efforts are concentrated on more meaningful data. + +#### Cost Management + +In scenarios where analytics data storage and processing incur significant costs, particularly in cloud-based deployments, disabling tracking for non-essential endpoints can be a cost-effective strategy. This approach allows for focusing resources on capturing valuable data from critical endpoints. + +### Working + +When transaction logging is enabled, the gateway will automatically generate a transaction record for every request made to deployed APIs. + +You can enable the do-not-track middleware on whichever endpoints for which you do not want to generate logs. This will instruct the Gateway not to generate any transaction records for those endpoints or APIs. As no record of these transactions will be generated by the Gateway, there will be nothing created in Redis and hence nothing for the pumps to transfer to the persistent storage and these endpoints will not show traffic in the Dashboard's analytics screens. + + + +When working with Tyk Classic APIs, you can disable tracking at the API or endpoint-level. When working with Tyk OAS APIs, you can currently disable tracking only at the more granular endpoint-level. + + + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Do-Not-Track middleware summary + - The Do-Not-Track middleware is an optional stage in Tyk's API Request processing chain sitting between the [TBC]() and [TBC]() middleware. + - The Do-Not-Track middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + +## Using Tyk OAS + + +The [Do-Not-Track](#do-not-track-overview) middleware provides the facility to disable generation of transaction records (which are used to track requests to your APIs). When working with Tyk OAS APIs, you can currently disable tracking only at the endpoint-level. + +When working with Tyk OAS APIs the middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation) either manually within the `.json` file or from the API Designer in the Tyk Dashboard. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#do-not-track-using-classic) page. + +### API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. The `path` can contain wildcards in the form of any string bracketed by curly braces, for example `{user_id}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The do-not-track middleware (`doNotTrackEndpoint`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `doNotTrackEndpoint` object has the following configuration: +- `enabled`: enable the middleware for the endpoint + +For example: +```json {hl_lines=["39-41"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-do-not-track", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-do-not-track", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-do-not-track/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "doNotTrackEndpoint": { + "enabled": true + } + } + } + } + } +} +``` + +In this example the do-not-track middleware has been configured for requests to the `GET /anything` endpoint. Any such calls will not generate transaction records from the Gateway and so will not appear in the analytics. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the do-not-track middleware. + +### API Designer + +Adding do-not-track to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Do Not Track Endpoint middleware** + + Select **ADD MIDDLEWARE** and choose the **Do Not Track Endpoint** middleware from the *Add Middleware* screen. + + Adding the Do Not Track middleware + +3. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +## Using Classic + + +The [Do-Not-Track](#do-not-track-overview) middleware provides the facility to disable generation of transaction records (which are used to track requests) at the API or endpoint level. + +When working with Tyk Classic APIs the middleware is configured in the Tyk Classic API Definition either manually within the `.json` file or from the API Designer in the Tyk Dashboard. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](/api-management/traffic-transformation/do-not-track#do-not-track-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the middleware in Tyk Operator](#tyk-operator) section below. + +### API Definition + +You can prevent tracking for all endpoints of an API by configuring the `do_not_track` field in the root of your API definition. +- `true`: no transaction logs will be generated for requests to the API +- `false`: transaction logs will be generated for requests to the API + +If you want to be more granular and disable tracking only for selected endpoints, then you must add a new `do_not_track_endpoints` object to the `extended_paths` section of your API definition. + +The `do_not_track_endpoints` object has the following configuration: +- `path`: the endpoint path +- `method`: the endpoint HTTP method + +The `path` can contain wildcards in the form of any string bracketed by curly braces, for example `{user_id}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +For example: +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "do_not_track_endpoints": [ + { + "disabled": false, + "path": "/anything", + "method": "GET" + } + ] + } +} +``` + +In this example the do-not-track middleware has been configured for requests to the `GET /anything` endpoint. Any such calls will not generate transaction records from the Gateway and so will not appear in the analytics. + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the per-endpoint Do-Not-Track middleware for your Tyk Classic API by following these steps. Note that the API-level middleware can only be configured from the Raw Definition screen. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you do not want to generate records. Select the **Do not track endpoint** plugin. + + Select the middleware + +2. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +### Tyk Operator + +The process for configuring the middleware in Tyk Operator is similar to that explained in configuring the middleware in the Tyk Classic API Definition. + +It is possible to prevent tracking for all endpoints of an API by configuring the `do_not_track` field in the root of your API definition as follows: + +- `true`: no transaction logs will be generated for requests to the API +- `false`: transaction logs will be generated for requests to the API + +```yaml {linenos=true, linenostart=1, hl_lines=["10"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-do-not-track +spec: + name: httpbin-do-not-track + use_keyless: true + protocol: http + active: true + do_not_track: true + proxy: + target_url: http://example.com + listen_path: /example + strip_listen_path: true +``` + +If you want to disable tracking only for selected endpoints, then the process is similar to that defined in configuring the middleware in the Tyk Classic API Definition, i.e. you must add a new `do_not_track_endpoints` list to the extended_paths section of your API definition. +This should contain a list of objects representing each endpoint `path` and `method` that should have tracking disabled: + +```yaml {linenos=true, linenostart=1, hl_lines=["31-33"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-endpoint-tracking +spec: + name: httpbin - Endpoint Track + use_keyless: true + protocol: http + active: true + do_not_track: false + proxy: + target_url: http://httpbin.org/ + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + track_endpoints: + - method: GET + path: "/get" + do_not_track_endpoints: + - method: GET + path: "/headers" +``` + +In the example above we can see that the `do_not_track_endpoints` list is configured so that requests to `GET /headers` will have tracking disabled. + diff --git a/api-management/traffic-transformation/go-templates.mdx b/api-management/traffic-transformation/go-templates.mdx new file mode 100644 index 000000000..d6738d2ba --- /dev/null +++ b/api-management/traffic-transformation/go-templates.mdx @@ -0,0 +1,229 @@ +--- +title: "Go Templates" +description: "How to configure Go Templates traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Go Templates" +sidebarTitle: "Go Templates" +--- + +Tyk's [request](/api-management/traffic-transformation/request-body) and [response](/api-management/traffic-transformation/response-body) body transform middleware use the [Go template language](https://golang.org/pkg/text/template/) to parse and modify the provided input. + +Go templates are also used by Tyk's [webhook event handler](/api-management/gateway-events#event-handling-with-webhooks) to produce the payload for the HTTP request sent to the target system. + +In this section of the documentation, we provide some guidance and a few examples on the use of Go templating with Tyk. + +## Data format conversion using helper functions + +Tyk provides two helper functions to assist with data format translation between JSON and XML: +- `jsonMarshal` performs JSON style character escaping on an XML field and, for complex objects, serialises them to a JSON string ([example](#xml-to-json-conversion-using-jsonmarshal)) +- `xmlMarshal` performs the equivalent conversion from JSON to XML ([example](#json-to-xml-conversion-using-xmlmarshal)) + +When creating these functions within your Go templates, please note: +- the use of `.` in the template refers to the entire input, whereas something like `.myField` refers to just the `myField` field of the input +- the pipe `|` joins together the things either side of it, which is typically input data on the left and a receiving command to process the data on the right, such as `jsonMarshal` + +Hence `{{ . | jsonMarshal }}` will pass the entire input to the `jsonMarshal` helper function. + +## Using functions within Go templates + +You can define and use functions in the Go templates that are used for body transforms in Tyk. Functions allow you to abstract common template logic for cleaner code and to aid reusability. Breaking the template into functions improves readability of more complex tenplates. + +Here is an example where we define a function called `myFunction` that accepts one parameter: +```go +{{- define "myFunction" }} + Hello {{.}}! +{{- end}} +``` + +We can call that function and pass "world" as the parameter: +```go +{ + "message": {{ call . "myFunction" "world"}} +} +``` + +The output would be: +```json +{ + "message": "Hello world!" +} +``` + +We have bundled the [Sprig Library (v3)](http://masterminds.github.io/sprig/) which provides over 70 pre-written functions for transformations to assist the creation of powerful Go templates to transform your API requests. + +## Additional resources + +Here's a useful [blog post](https://blog.gopheracademy.com/advent-2017/using-go-templates/) and [YouTube tutorial](https://www.youtube.com/watch?v=k5wJv4XO7a0) that can help you to learn about using Go templates. + +## Go templating examples +Here we provide worked examples for both [JSON](#example-json-transformation-template) and [XML](#example-xml-transformation-template) formatted inputs. We also explain examples using the [jsonMarshal](#xml-to-json-conversion-using-jsonmarshal) and [xmlMarshal](#json-to-xml-conversion-using-xmlmarshal) helper functions. + +### Example JSON transformation template +Imagine you have a published API that accepts the request listed below, but your upstream service requires a few alterations, namely: +- swapping the values of parameters `value1` and `value2` +- renaming the `value_list` to `transformed_list` +- adding a `user-id` extracted from the session metadata +- adding a `client-ip` logging the client IP +- adding a `req-type` that logs the value provided in query parameter `type` + +**Input** +- Session metadata `uid` = `user123` +- IP address of calling client = `192.0.0.1` +- Query parameter `type` = `strict` +```json +{ + "value1": "value-1", + "value2": "value-2", + "value_list": [ + "one", + "two", + "three" + ] +} +``` + +**Template** +```go +{ + "value1": "{{.value2}}", + "value2": "{{.value1}}", + "transformed_list": [ + {{range $index, $element := index . "value_list"}} + {{if $index}}, {{end}} + "{{$element}}" + {{end}} + ], + "user-id": "{{._tyk_meta.uid}}", + "user-ip": "{{._tyk_context.remote_addr}}", + "req-type": "{{ ._tyk_context.request_data.param.type }}" +} +``` +In this template: +- `.value1` accesses the "value1" field of the input JSON +- we swap value1 and value2 +- we use the range function to loop through the "value_list" array +- `._tyk_meta.uid` injects the "uid" session metadata value +- `._tyk_context.remote_addr` injects the client IP address from the context +- `._tyk_context.request_data.param.type` injects query parameter "type" + +**Output** +``` .json +{ + "value1": "value-2", + "value2": "value-1", + "transformed_list": [ + "one", + "two", + "three" + ], + "user-id": "user123" + "user-ip": "192.0.0.1" + "req-type": "strict" +} +``` + +### Example XML transformation template +XML cannot be as easily decoded into strict structures as JSON, so the syntax is a little different when working with an XML document. Here we are performing the reverse translation, starting with XML and converting to JSON. + +**Input** +- Session metadata `uid` = `user123` +- IP address of calling client = `192.0.0.1` +- Query parameter `type` = `strict` +```xml + + + + value-1 + value-2 + + one + two + three + + + +``` + +**Template** +``` .xml + + + + {{ .data.body.value2 }} + {{ .data.body.value1 }} + + {{range $index, $element := .data.body.valueList.item }} + {{$element}} + {{end}} + + {{ ._tyk_meta.uid }} + {{ ._tyk_context.remote_addr }} + {{ ._tyk_context.request_data.param.type }} + + +``` +In this template: +- `.data.body.value1` accesses the "value1" field of the input XML +- we swap value1 and value2 +- we use the range function to loop through the "value_list" array +- `._tyk_meta.uid` injects the "uid" session metadata value +- `._tyk_context.remote_addr` injects the client IP address from the context +- `._tyk_context.request_data.param.type` injects query parameter "type" + +**Output** +``` .xml + + + + value-2 + value-1 + + one + two + three + + user123 + 192.0.0.1 + strict + + +``` + +### XML to JSON conversion using jsonMarshal +The `jsonMarshal` function converts XML formatted input into JSON, for example: + +**Input** +```xml +world +``` + +**Template** +```go +{{ . | jsonMarshal }} +``` + +**Output** +```json +{"hello":"world"} +``` + +Note that in this example, Go will step through the entire data structure provided to the template. When used in the [Request](/api-management/traffic-transformation/request-body#data-accessible-to-the-middleware) or [Response](/api-management/traffic-transformation/request-body#data-accessible-to-the-middleware) Body Transform middleware, this would include Context Variables and Session Metadata if provided to the middleware. + +### JSON to XML conversion using xmlMarshal +The `xmlMarshal` function converts JSON formatted input into XML, for example: + +**Input** +```json +{"hello":"world"} +``` +**Template** +``` .go +{{ . | xmlMarshal }} +``` + +**Output** +```xml +world +``` + +Note that in this example, Go will step through the entire data structure provided to the template. When used in the [Request](/api-management/traffic-transformation/request-body#data-accessible-to-the-middleware) or [Response](/api-management/traffic-transformation/request-body#data-accessible-to-the-middleware) Body Transform middleware, this would include Context Variables and Session Metadata if provided to the middleware. + diff --git a/api-management/traffic-transformation/ignore-authentication.mdx b/api-management/traffic-transformation/ignore-authentication.mdx new file mode 100644 index 000000000..7b1ae5e72 --- /dev/null +++ b/api-management/traffic-transformation/ignore-authentication.mdx @@ -0,0 +1,301 @@ +--- +title: "Ignore Authentication" +description: "How to configure Ignore Authentication traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Ignore Authentication" +sidebarTitle: "Ignore Authentication" +--- + +## Overview + + +The Ignore Authentication middleware instructs Tyk Gateway to skip the authentication step for calls to an endpoint, even if authentication is enabled for the API. + +### Use Cases + +#### Health and liveness endpoints + +This plugin can be very useful if you have an endpoint (such as a ping or health check) that you don’t need to secure. + +### Working + +When the Ignore Authentication middleware is configured for a specific endpoint, it instructs the gateway to bypass the client authentication process for requests made to that endpoint. If other (non-authentication) middleware are configured for the endpoint, they will still execute on the request. + +It is important to exercise caution when using the Ignore Authentication middleware, as it effectively disables Tyk's security features for the ignored paths. Only endpoints that are designed to be public or have independent security mechanisms should be configured to bypass authentication in this way. When combining Ignore Authentication with response transformations be careful not to inadvertently expose sensitive data or rely on authentication or session data that is not present. + +#### Case sensitivity + +By default the ignore authentication middleware is case-sensitive. If, for example, you have defined the endpoint `GET /ping` in your API definition then only calls to `GET /ping` will ignore the authentication step: calls to `GET /Ping` or `GET /PING` will require authentication. You can configure the middleware to be case insensitive at the endpoint level. + +You can also set case sensitivity for the entire Tyk Gateway in its [configuration file](/tyk-oss-gateway/configuration#ignore_endpoint_case) `tyk.conf`. If case insensitivity is configured at the gateway level, this will override the endpoint-level setting. + +#### Endpoint parsing + +When using the ignore authentication middleware, we recommend that you familiarize yourself with Tyk's [URL matching](/getting-started/key-concepts/url-matching) options. + +
+ + +Tyk recommends that you use [exact](/getting-started/key-concepts/url-matching#exact-match) matching for maximum security, though prefix and wildcard strategies might also apply for your particular deployment or use case. + + + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Ignore Authentication middleware summary + - The Ignore Authentication middleware is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Ignore Authentication middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + +## Using Tyk OAS + + +The [Ignore Authentication](/api-management/traffic-transformation/ignore-authentication) middleware instructs Tyk Gateway to skip the authentication step for calls to an endpoint, even if authentication is enabled for the API. + +When working with Tyk OAS APIs the middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#ignore-authentication-using-classic) page. + +### API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The ignore authentication middleware (`ignoreAuthentication`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `ignoreAuthentication` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `ignoreCase`: if set to `true` then the path matching will be case insensitive + +For example: +```json {hl_lines=["65-69"],linenos=true, linenostart=1} +{ + "info": { + "title": "example-ignore-authentication", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://localhost:8181/example-ignore-authentication/" + } + ], + "security": [ + { + "authToken": [] + } + ], + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "components": { + "securitySchemes": { + "authToken": { + "type": "apiKey", + "in": "header", + "name": "Authorization" + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-ignore-authentication", + "state": { + "active": true, + "internal": false + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "authentication": { + "enabled": true, + "securitySchemes": { + "authToken": { + "enabled": true + } + } + }, + "listenPath": { + "strip": true, + "value": "/example-ignore-authentication/" + } + }, + "middleware": { + "operations": { + "anythingget": { + "ignoreAuthentication": { + "enabled": true + } + } + } + } + } +} +``` + +In this example the ignore authentication middleware has been configured for requests to the `GET /anything` endpoint. Any such calls will skip the authentication step in the Tyk Gateway's processing chain. +- the middleware has been configured to be case sensitive, so calls to `GET /Anything` will not skip authentication + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the Ignore Authentication middleware. + +### API Designer + +Adding and configuring the Ignore Authentication middleware to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Ignore Authentication middleware** + + Select **ADD MIDDLEWARE** and choose the **Ignore Authentication** middleware from the *Add Middleware* screen. + + Adding the Ignore Authentication middleware + +3. **Optionally configure case-insensitivity** + + If you want to disable case-sensitivity for the path that you wish to skip authentication, then you must select **EDIT** on the Ignore Authentication icon. + + Ignore Authentication middleware added to endpoint - click through to edit the config + + This takes you to the middleware configuration screen where you can alter the case sensitivity setting. + Configuring case sensitivity for the path for which to ignore authentication + + Select **UPDATE MIDDLEWARE** to apply the change to the middleware configuration. + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +## Using Classic + + +The [Ignore Authentication](/api-management/traffic-transformation/ignore-authentication) middleware instructs Tyk Gateway to skip the authentication step for calls to an endpoint, even if authentication is enabled for the API. + +When working with Tyk Classic APIs the middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#ignore-authentication-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the middleware in Tyk Operator](#tyk-operator) section below. + +### API Definition + +To enable the middleware you must add a new `ignored` object to the `extended_paths` section of your API definition. + +The `ignored` object has the following configuration: +- `path`: the endpoint path +- `method`: this should be blank +- `ignore_case`: if set to `true` then the path matching will be case insensitive +- `method_actions`: a shared object used to configure the [mock response](/api-management/traffic-transformation/mock-response#when-is-it-useful) middleware + +The `method_actions` object should be configured as follows, with an entry created for each allowed method on the path: +- `action`: this should be set to `no_action` +- `code`: this should be set to `200` +- `headers` : this should be blank + +For example: +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "ignored": [ + { + "disabled": false, + "path": "/status/200", + "method": "", + "ignore_case": false, + "method_actions": { + "GET": { + "action": "no_action", + "code": 200, + "headers": {} + } + } + } + ] + } +} +``` + +In this example the ignore authentication middleware has been configured for requests to the `GET /status/200` endpoint. Any such calls will skip the authentication step in the Tyk Gateway's processing chain. +- the middleware has been configured to be case sensitive, so calls to `GET /Status/200` will not skip authentication + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the Ignore Authentication middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to ignore authentication. Select the **Ignore** plugin. + + Adding the ignore authentication middleware to a Tyk Classic API endpoint + +2. **Configure the middleware** + + Once you have selected the Ignore middleware for the endpoint, the only additional feature that you need to configure is whether to make it case-insensitive by selecting **Ignore Case**. + + Ignore options + +3. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +### Tyk Operator + +The process for configuring the middleware in Tyk Operator is similar to that explained in configuring the middleware in the Tyk Classic API Definition. It is possible to configure an enforced timeout using the `ignored` object within the `extended_paths` section of the API Definition. + +In the example below the ignore authentication middleware has been configured for requests to the `GET /get` endpoint. Any such calls will skip the authentication step in the Tyk Gateway's processing chain. +- the middleware has been configured to be case insensitive, so calls to `GET /Get` will also skip authentication + +```yaml {linenos=true, linenostart=1, hl_lines=["27-35"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-ignored +spec: + name: httpbin-ignored + use_keyless: false + use_standard_auth: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org/ + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + ignored: + - ignore_case: true + method_actions: + GET: + action: "no_action" + code: 200 + data: "" + headers: {} + path: "/get" +``` \ No newline at end of file diff --git a/api-management/traffic-transformation/internal-endpoint.mdx b/api-management/traffic-transformation/internal-endpoint.mdx new file mode 100644 index 000000000..d056e494c --- /dev/null +++ b/api-management/traffic-transformation/internal-endpoint.mdx @@ -0,0 +1,270 @@ +--- +title: "Internal Endpoint" +description: "How to configure Internal Endpoint traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Internal Endpoint" +sidebarTitle: "Internal Endpoint" +--- + +## Overview + + +The Internal Endpoint middleware instructs Tyk Gateway to ignore external requests to the endpoint (which is a combination of HTTP method and path). Internal requests from other APIs will be processed. + +### Use Cases + +#### Internal routing decisions + +Internal endpoints are frequently used to make complex routing decisions that cannot be handled by the standard routing features. A single externally published endpoint can receive requests and then, based on inspection of the requests, the [URL rewrite](/transform-traffic/url-rewriting#url-rewrite-middleware) middleware can route them to different internal endpoints and on to the appropriate upstream services. + +### Working + +When the Internal Endpoint middleware is configured for a specific endpoint, it instructs the Gateway to ignore requests to the endpoint that originate from outside Tyk. + +An internal endpoint can be targeted from another API deployed on Tyk using the `tyk://` prefix instead of `http://`. + +For example, if `GET /status/200` is configured to be an Internal Endpoint on the listen path `http://my-tyk-install.org/my-api/` then external calls to this endpoint will be rejected with `HTTP 403 Forbidden`. Other APIs on Tyk will be able to direct traffic to this endpoint by setting their `target_url` to `tyk://my-api/status/200`. + +#### Addressing an internal endpoint + +An internal endpoint can be addressed using three different identifiers in the format `tyk://{identifier}/{endpoint}`. + +The options for the `identifier` are: +- `self` (only if the endpoint is in the same API) +- `api_id` (the unique API Identifier assigned to the API within Tyk) +- listen path (the listen path defined for the API) + +For example, let's say you have two APIs: + +| api_id | listen path | Endpoint 1 | Endpoint 2 (with internal endpoint middleware) | +| :-------- | :------------- | :-------------- | :------------------------------------------------ | +| f1c63fa5177de2719 | `/api1` | `endpoint1_ext` | `endpoint1_int` | +| 2e90b33a879945918 | `/api2` | `endpoint2_ext` | `endpoint2_int` | + +An external request directed at `/api1/endpoint1_int` will be rejected with `HTTP 403 Forbidden`, since this is an internal endpoint. + +This endpoint could, however, be called from within either endpoint in `/api2` as either: +- `tyk://api1/endpoint1_int` +- `tyk://f1c63fa5177de2719/endpoint1_int` + +Or from within `/api1/endpoint1_ext` as: +- `tyk://api1/endpoint1_int` +- `tyk://f1c63fa5177de2719/endpoint1_int` +- `tyk://self/endpoint1_int` + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Internal Endpoint middleware summary + - The Internal Endpoint middleware is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Internal Endpoint middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + + +## Using Tyk OAS + + +The [Internal Endpoint](#internal-endpoint-overview) middleware instructs Tyk Gateway not to process external requests to the endpoint (which is a combination of HTTP method and path). Internal requests from other APIs will be processed. + +When working with Tyk OAS APIs, the middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#internal-endpoint-using-classic) page. + +### API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The internal endpoint middleware (`internal`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `internal` object has the following configuration: +- `enabled`: enable the middleware for the endpoint + +For example: +```json {hl_lines=["49-50"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-internal-endpoint", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + }, + "/redirect": { + "get": { + "operationId": "redirectget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-internal-endpoint", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-internal-endpoint/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "internal": { + "enabled": true + } + }, + "redirectget": { + "urlRewrite": { + "enabled": true, + "pattern": ".*", + "rewriteTo": "tyk://self/anything" + } + } + } + } + } +} +``` + +In this example, two endpoints have been defined: +- the internal endpoint middleware has been configured for requests to the `GET /anything` endpoint +- the [URL rewrite](/transform-traffic/url-rewriting#url-rewrite-middleware) middleware has been configured for requests to the `GET /redirect` endpoint + +Any calls made directly to `GET /example-internal-endpoint/anything` will be rejected, with Tyk returning `HTTP 403 Forbidden`, since the `/anything` endpoint is internal. + +Any calls made to `GET /example-internal-endpoint/redirect` will be redirected to `GET /example-internal-endpoint/anything`. These will be proxied to the upstream because they originate from within Tyk Gateway (i.e. they are internal requests) - so the response from `GET http://httpbin.org/anything` will be returned. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the internal endpoint middleware. + +### API Designer + +Adding the Internal Endpoint middleware to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Internal Endpoint middleware** + + Select **ADD MIDDLEWARE** and choose the **Internal** middleware from the *Add Middleware* screen. + + Adding the Internal Endpoint middleware + +3. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +## Using Classic + + +The [Internal Endpoint](#internal-endpoint-overview) middleware instructs Tyk Gateway not to process external requests to the endpoint (which is a combination of HTTP method and path). Internal requests from other APIs will be processed. + +When working with Tyk Classic APIs, the middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#internal-endpoint-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the middleware in Tyk Operator](#tyk-operator) section below. + +### API Definition + +To enable the middleware you must add a new `internal` object to the `extended_paths` section of your API definition. + +The `internal` object has the following configuration: +- `path`: the endpoint path +- `method`: the endpoint HTTP method + +For example: +```.json {linenos=true, linenostart=1} +{ + "extended_paths": { + "internal": [ + { + "disabled": false, + "path": "/status/200", + "method": "GET" + } + ] + } +} +``` + +In this example the internal endpoint middleware has been configured for HTTP `GET` requests to the `/status/200` endpoint. Any requests made to this endpoint that originate externally to Tyk will be rejected with `HTTP 403 Forbidden`. Conversely, the endpoint can be reached internally by another API at `tyk:///status/200`. + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the internal endpoint middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path that you wish to set as internal. Select the **Internal** plugin. + + Adding the internal endpoint middleware to a Tyk Classic API endpoint + +2. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +### Tyk Operator + +The process for configuring the middleware in Tyk Operator is similar to that explained in configuring the middleware in the Tyk Classic API Definition. The middleware can be configured by adding a new `internal` object to the `extended_paths` section of your API definition. + +In the example below the internal endpoint middleware has been configured for HTTP `GET` requests to the `/status/200` endpoint. Any requests made to this endpoint that originate externally to Tyk will be rejected with `HTTP 403 Forbidden`. Conversely, the endpoint can be reached internally by another API at `tyk:///status/200`. + +```yaml {linenos=true, linenostart=1, hl_lines=["26-28"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-endpoint-internal +spec: + name: httpbin - Endpoint Internal + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org/ + listen_path: /httpbin-internal + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + internal: + - path: /status/200 + method: GET +``` + + + diff --git a/api-management/traffic-transformation/jq-transforms.mdx b/api-management/traffic-transformation/jq-transforms.mdx new file mode 100644 index 000000000..8d3e4e97e --- /dev/null +++ b/api-management/traffic-transformation/jq-transforms.mdx @@ -0,0 +1,65 @@ +--- +title: "JQ Transforms" +description: "How to configure JQ Transforms traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, JQ Transforms" +sidebarTitle: "JQ Transforms" +--- + + + +This feature is experimental and can be used only if you compile Tyk yourself own using `jq` tag: `go build --tags 'jq'` + + + + +If you work with JSON you are probably aware of the popular `jq` command line JSON processor. For more details, see https://stedolan.github.io/jq/. + +Now you can use the full power of its queries and transformations to transform requests, responses, headers and even context variables. + +We have added two new plugins: + +* `transform_jq` - for request transforms. +* `transform_jq_response` - for response transforms + +Both have the same structure, similar to the rest of our plugins: +`{ "path": "", "method": "", "filter": "" }` + +## Request Transforms +Inside a request transform you can use following variables: + +* `.body` - your current request body +* `._tyk_context` - Tyk context variables. You can use it to access request headers as well. + +Your JQ request transform should return an object in the following format: +`{ "body": , "rewrite_headers": , "tyk_context": }`. + +`body` is required, while `rewrite_headers` and `tyk_context` are optional. + +## Response Transforms +Inside a response transform you can use following variables: + +* `.body` - your current response body +* `._tyk_context` - Tyk context variables. You can use it to access request headers as well. +* `._tyk_response_headers` - Access to response headers + +Your JQ response transform should return an object in the following format: +`{ "body": , "rewrite_headers": }`. + +`body` is required, while `rewrite_headers` is optional. + +## Example +```{.json} +"extended_paths": { + "transform_jq": [{ + "path": "/post", + "method": "POST", + "filter": "{\"body\": (.body + {\"TRANSFORMED-REQUEST-BY-JQ\": true, path: ._tyk_context.path, user_agent: ._tyk_context.headers_User_Agent}), \"rewrite_headers\": {\"X-added-rewrite-headers\": \"test\"}, \"tyk_context\": {\"m2m_origin\": \"CSE3219/C9886\", \"deviceid\": .body.DEVICEID}}" + }], + "transform_jq_response": [{ + "path": "/post", + "method": "POST", + "filter": "{\"body\": (.body + {\"TRANSFORMED-RESPONSE-BY-JQ\": true, \"HEADERS-OF-RESPONSE\": ._tyk_response_headers}), \"rewrite_headers\": {\"JQ-Response-header\": .body.origin}}" + }] +} +``` + diff --git a/api-management/traffic-transformation/mock-response.mdx b/api-management/traffic-transformation/mock-response.mdx new file mode 100644 index 000000000..74df59b9b --- /dev/null +++ b/api-management/traffic-transformation/mock-response.mdx @@ -0,0 +1,802 @@ +--- +title: "Mock Response" +description: "How to configure Mock Response traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Mock Response" +sidebarTitle: "Mock Response" +--- + +## Overview + +A mock response is a simulated API response that can be returned by the API gateway without actually sending the request to the backend API service. Mock responses are an integral feature for API development, enabling developers to emulate API behavior without the need for upstream execution. + +Tyk's mock response middleware offers this functionality by allowing the configuration of custom responses for designated endpoints. This capability is crucial for facilitating front-end development, conducting thorough testing, and managing unexpected scenarios or failures. + +### When is it useful + +#### Rapid API Prototyping + +Developers can create predefined static (mock) responses during early API prototyping phases to simulate responses without any backend. This is useful for several reasons: + +- **Validate API Design Early**: It enables [trying an API before writing any code](https://tyk.io/blog/3-ways-to-try-out-your-api-design-before-you-build). API designers and product managers can validate concepts without waiting for full API implementations. +- **Enable Dependent Development**: Allows development of apps and tooling that depend on the upstream service to progress. For example, the front-end team can build their interface based on the mocked responses. +- **Support Test-Driven Development (TDD) and Behavior-Driven Development (BDD)**: Supports writing test cases for the API before implementation, enabling design and testing of the API without writing any backend code. + +#### Legacy System Migration + +When migrating from a legacy system to new APIs, mock responses can be used to emulate the old system's outputs during the transitional phases. This provides continuity for client apps relying on the old system while new APIs are built that will eventually replace the legacy hooks. + +#### Disaster Recovery Testing + +The ability for a gateway to return well-formed mocks when backend APIs are unavailable helps test disaster recovery plans. By intentionally taking APIs offline and verifying the mocks' surface instead, developers gain confidence in the gateway's fallback and business continuity capabilities + +#### Enhanced CI/CD pipeline + +Test cases that rely on API interactions can mock those dependencies and provide virtual test data. This removes wait times for real API calls to complete during automated builds. Consumer testing can verify that provider APIs meet expected contracts using mocks in the CI pipeline. This ensures the contract remains intact across code changes before deployment. Front-end/client code can scale release cycles faster than backend APIs by using mocks to simulate planned API behaviors before they are ready. + +### Working + +When the Mock Response middleware is configured for a specific endpoint, it terminates requests to the endpoint and generates an HTTP response that will be returned to the client as if it had come from the upstream service. No request will be passed to the upstream. The mock response to an API request should be designed to emulate how the service would respond to requests. To enable this, the content of the response can be configured to match the API contract for the service: you can set the HTTP status code, body and headers for the response. + +### Advanced mock responses with Tyk OAS + +When working with Tyk OAS APIs, Tyk Gateway can parse the [examples and schema](/api-management/traffic-transformation/mock-response#mock-responses-using-openapi-metadata) in the OpenAPI description and use this to automatically generate responses using those examples. Where multiple examples are defined, for example for different response codes, Tyk enables you to [configure special headers](#multiple-mock-responses-for-a-single-endpoint) in the request to select the desired mock response. + +### Middleware execution order during request processing + +#### With **Tyk OAS APIs** + +- the mock response middleware is executed at the **end** of the request processing chain, immediately prior to the request being proxied to the upstream +- all other request processing middleware (e.g. authentication, request transforms) will be executed prior to the mock response. + +#### With **Tyk Classic APIs** + +- the mock response middleware is executed at the **start** of the request processing chain +- an endpoint with a mock response will not run any other middleware and will immediately return the mocked response for any request. As such, it is always an unauthenticated (keyless) call. + +
+ +If you’re using Tyk OAS APIs, then you can find details and examples of how to configure the mock response middleware [here](#mock-response-using-tyk-oas). + +If you’re using Tyk Classic APIs, then you can find details and examples of how to configure the response body transformation middleware [here](#mock-response-using-classic). + +{/* proposed "summary box" to be shown graphically on each middleware page + ### Mock Response middleware summary + - The Mock Response middleware is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Mock Response middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + + +## Mock Responses using OpenAPI Metadata + +The [OpenAPI Specification](https://learn.openapis.org/specification/docs.html#adding-examples) provides metadata that can be used by automatic documentation generators to create comprehensive reference guides for APIs. Most objects in the specification include a `description` field that offers additional human-readable information for documentation. Alongside descriptions, some OpenAPI objects can include sample values in the OpenAPI Document, enhancing the generated documentation by providing representative content that the upstream service might return in responses. + +Tyk leverages examples from your API documentation (in OpenAPI Spec format) to generate mock responses for the API exposed via the gateway. Based on this data, Tyk adds a new middleware named "Mock Response" and returns various mock responses according to your spec. Refer to the [Mock configuration guide](#automatic-configuration-inferred-from-your-openapi-document) to learn how to do this. + +The specification provides three methods for Tyk to deduce the mock response: `example`, `examples` and `schema`. +1. `example`: A sample value that could be returned in a specific field in a response (see [below](#using-example-to-generate-a-mock-response)) +2. `examples`: A map pairing an example name with an Example Object (see [below](#using-examples-to-generate-a-mock-response)) +3. `schema`: JSON schema for the expected response body (see [below](#using-schema-to-generate-a-mock-response) + +Note: +- `example` and `examples` are mutually exclusive within the OpenAPI Document for a field in the `responses` object: the developer cannot provide both for the same object. +- The `content-type` (e.g. `application/json`, `text/plain`), per OpenAPI Specification, must be declared for each `example` or `examples` in the API description. + +Let's see how to use each method: + + +### Using `example` to generate a mock response + +In the following extract from an OpenAPI description, a single `example` has been declared for a request to `GET /get` - the API developer indicates that such a call could return `HTTP 200` and the body value `Response body example` in plain text format. + +```json {hl_lines=["9-11"],linenos=true, linenostart=1} +{ + "paths": { + "/get": { + "get": { + "operationId": "getget", + "responses": { + "200": { + "content": { + "text/plain": { + "example": "Response body example" + } + }, + "description": "200 OK response for /get with a plain text" + } + } + } + } + } +} +``` + +### Using `examples` to generate a mock response + +In this extract, the API developer also indicates that a call to `GET /get` could return `HTTP 200` but here provides two example body values `Response body from first-example` and `Response body from second-example`, again in plain text format. + +``` json {hl_lines=["9-18"],linenos=true, linenostart=1} +{ + "paths": { + "/get": { + "get": { + "operationId": "getget", + "responses": { + "200": { + "content": { + "text/plain": { + "examples": { + "first-example": { + "value": "Response body from first-example" + }, + "second-example": { + "value": "Response body from second-example" + } + } + } + }, + "description": "This is a mock response example with 200OK" + } + } + } + } + } +} +``` + +The `exampleNames` for these two values have been configured as `first-example` and `second-example` and can be used to [invoke the desired response](#multiple-mock-responses-for-a-single-endpoint) from a mocked endpoint. + +### Using `schema` to generate a mock response + +If there is no `example` or `examples` defined for an endpoint, Tyk will try to find a `schema` for the response. If there is a schema, it will be used to generate a mock response. Tyk can extract values from referenced or nested schema objects when creating the mock response. + +### Response headers schema +Response headers do not have standalone `example` or `examples` attributes, however, they can have a `schema` - the Mock Response middleware will include these in the mock response if provided in the OpenAPI description. + +The schema properties may have an `example` field, in which case they will be used to build a mock response. If there is no `example` value in the schema then default values are used to build a response as follows: +- `string` > `"string"` +- `integer` > `0` +- `boolean` > `true` + +For example, below is a partial OpenAPI description, that defines a schema for the `GET /get` endpoint + +```json {hl_lines=["10-13", "18-33"],linenos=true, linenostart=1} +{ + "paths": { + "/get": { + "get": { + "operationId": "getget", + "responses": { + "200": { + "headers": { + "X-Status": { + "schema": { + "type": "string", + "example": "status-example" + } + } + }, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "lastName": { + "example": "Lastname-placeholder", + "type": "string" + }, + "firstname": { + "type": "string" + }, + "id": { + "type": "integer" + } + } + } + } + }, + "description": "This is a mock response example with 200OK" + } + } + } + } + } +} +``` + +Tyk Gateway could use the above to generate the following mock response: + +```http +HTTP/1.1 200 OK +X-Status: status-example +Content-Type: application/json + +{ + "lastName": "Lastname-placeholder", + "firstname": "string", + "id": 0 +} +``` +Notice that in the mock response above, `firstname` has the value `string` since there was no example for it in the OpenAP document so Tyk used the word `string` as the value for this field. + + +## Using Tyk OAS + + +This tutorial is for Tyk OAS API definition users. If you're using the legacy Tyk Classic APIs, please refer to the [Tyk Classic Mock Response tutorial](#mock-response-using-classic). + +The [Mock Response](/api-management/traffic-transformation/mock-response) middleware allows you to configure Tyk to return a response for an API endpoint without requiring an upstream service. + +When working with Tyk OAS APIs, this middleware is executed at the **end** of the request processing chain immediately prior to the upstream proxy stage. Thus, any other request processing middleware - including authentication - will be run before the request reaches the mock response. + +The middleware is defined in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). To create this definition, you can use the Tyk Dashboard API or the API Designer in the Tyk Dashboard UI. + +To configure or create a Mock Response middleware you have two modes, manual and automatic. Following please find detailed guidance for each mode. + +### Manual configuration + +You can configure a mock response directly in the API definition, in the middleware object under the Tyk extension section, `x-tyk-api-gateway`. Once added, you need to update or import it to Tyk Dashboard using Tyk Dashboard API or via Tyk Dashboard UI. This is useful when you have already tested your API in dev environments and now you need to deploy it to staging or production in a declarative manner. + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. + +The mock response middleware (`mockResponse`) can be added to the `x-tyk-api-gateway.middleware.operations` section (Tyk OAS Extension) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +For basic operation, the `mockResponse` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `code`: the HTTP status code to be provided with the response (this defaults to `200` if not set) +- `body`: the payload to be returned as the body of the response +- `headers`: the headers to inject with the response + +Please remember that this API definition needs to be updated in Tyk Dashboard. In the Dashboard UI it might be trivial but if you are doing it declaratively, you need to use the Tyk Dashboard API endpoint to update an existing API (PUT) or import as a new API (POST). Once updated, Tyk Gateway/s will return mock responses to all valid requests to the endpoint, depending on the [content of the request](#multiple-mock-responses-for-a-single-endpoint). + +In the following example, we configure a mock response middleware for requests to the `GET /example-mock-response1/anything` endpoint: + +```json {hl_lines=["39-49"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-mock-response1", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "200OK for /anything using anythingget" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-mock-response1", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-mock-response1/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "mockResponse": { + "enabled": true, + "code": 200, + "body": "This is the mock response body", + "headers": [ + { + "name": "X-Mock-Example", + "value": "mock-header-value" + } + ] + } + } + } + } + } +} +``` + +Once this API definition is updated in Tyk Dashboard, a call to `GET /example-mock-response1/anything` would return: + +```bash +HTTP/1.1 200 OK +X-Mock-Example: mock-header-value +Content-Type: text/plain; charset=utf-8 + +This is the mock response body +``` + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the mock response middleware. + +### Automatic configuration inferred from your OpenAPI Document + +Tyk will parse the [examples and schema](/api-management/traffic-transformation/mock-response#mock-responses-using-openapi-metadata) in the OpenAPI document and use them to generate mock responses automatically. + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human-readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The mock response middleware (`mockResponse`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +For basic operation, the `mockResponse` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `fromOASExamples`: an object used to instruct Tyk Gateway to return a response from the OpenAPI description + +The `fromOASExamples` object has the following configuration: +- `enabled`: enable the automatic configuration of mock response +- `code`: [optional] identifies which HTTP status code to be provided with the response (defaults to `200` if not set) +- `contentType`: [optional] identifies which response body type to use (defaults to `application/json` if not set) +- `exampleName`: [optional] the sample response to be returned from an `examples` list + +The three optional fields (`code`, `contentType`, `exampleName`) are used to identify which sample response should be returned by the mock if multiple sample responses are declared in the OpenAPI description. + +In the following example, the OpenAPI description declares three possible responses: two for HTTP 200 and one for HTTP 300. We have configured the Mock Response middleware to return the value defined for HTTP 200 (code) with `exampleName` set to "second-example". The JSON below shows the updated Tyk OAS API definition, after Tyk has generated and added the mock response middleware: + +```json {hl_lines=["15-24", "29-33", "59-67"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-mock-response2", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "content": { + "text/plain": { + "examples": { + "first-example": { + "value": "My favorite is pasta" + }, + "second-example": { + "value": "My second favorite is pizza" + } + } + } + }, + "description": "" + }, + "300": { + "content": { + "text/plain": { + "example": "There's too much choice" + } + }, + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-mock-response2", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-mock-response2/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "mockResponse": { + "enabled": true, + "fromOASExamples": { + "enabled": true, + "code": 200, + "contentType": "text/plain", + "exampleName": "second-example" + } + } + } + } + } + } +} +``` +Once this API definition is updated in Tyk Dashboard, a call to `GET /example-mock-response2/anything` would return: + +```bash +HTTP/1.1 200 OK +Content-Type: text/plain + +"My second favorite is pizza" +``` + +If you add `"code":300` in the `fromOASExamples` object, a call to `GET /example-mock-response2/anything` would instead respond as follows: + +```bash +HTTP/1.1 300 Multiple Choices +Content-Type: text/plain + +"There's too much choice" +``` + + + +If multiple `examples` are defined in the OpenAPI description but no default `exampleName` is set in the middleware configuration `fromOASExamples` Tyk will select randomly from the multiple `examples`. Yes, that means the response may change with every request. You can [control which response](#multiple-mock-responses-for-a-single-endpoint) will be returned using special headers in the request. + + + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the mock response middleware. + +### Multiple mock responses for a single endpoint + +When the mock response middleware in your Tyk OAS API is configured to return responses from the OpenAPI description within the API definition, you can invoke a specific response, overriding the defaults configured in the middleware, by providing specific headers in your request. + +To invoke a non-default response from a mocked endpoint, you must add *one or more* special headers to the request: +- `Accept`: This standard HTTP header will override the response content type (e.g. `application/json`, `text/plain`) +- `X-Tyk-Accept-Example-Code`: This will select the HTTP response code for which to return the example response (e.g. `400`) +- `X-Tyk-Accept-Example-Name`: This identifies which example to select from an `examples` list + +If an example response can’t be found for the configured `code`, `contentType` or `exampleName`, an HTTP 404 error will be returned to inform the client that there is no declared example for that configuration. + +In the example below, the OpenAPI document declares two possible responses: one for HTTP 200 and one for HTTP 300. We have configured the Mock Response middleware to return the value defined for HTTP 200 for which the body (content) is in JSON format and a custom header `X-Status` which will take the default value of `true`. +```json {hl_lines=["15-19", "22-39", "45-50", "53-55", "82-89"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-mock-response3", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "headers": { + "X-Status": { + "schema": { + "type": "boolean" + } + } + }, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "lastName": { + "example": "Bar", + "type": "string" + }, + "name": { + "example": "Foo", + "type": "string" + }, + "id": { + "type": "integer" + } + } + } + } + }, + "description": "" + }, + "300": { + "headers": { + "X-Status": { + "schema": { + "type": "boolean", + "example": false + } + } + }, + "content": { + "text/plain": { + "example": "Baz" + } + }, + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-mock-response3", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-mock-response3/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "mockResponse": { + "enabled": true, + "fromOASExamples": { + "enabled": true, + "code": 200, + "contentType": "application/json" + } + } + } + } + } + } +} +``` + +You can trigger the mock response for HTTP 300 by adding the following headers to your request: +- `X-Tyk-Accept-Example-Code`: 300 +- `Accept`: text/plain + +This would return a plain text body and the `X-Status` header set to `false`. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the mock response middleware. + +### Configuring mock response using Tyk Dashboard UI + +Adding a mock response to your API endpoints is easy when using the API Designer in the Tyk Dashboard UI, simply follow the steps appropriate to the configuration method you wish to use: +- [manual configuration](#manual-configuration) of the middleware config +- [automatic configuration](#automatic-configuration) from the OpenAPI description + +#### Manual configuration + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Mock Response middleware** + + Select **ADD MIDDLEWARE** and choose **Mock Response** middleware from the *Add Middleware* screen. + + Adding the Mock Response middleware + +3. **Configure the middleware** + + Select **Manually configure mock response** + + Mock Response middleware added to endpoint - select the configuration method you require + + This takes you to the middleware configuration screen where you can: + - choose the HTTP status code that you want Tyk Gateway to return + - select the content type + - add a description for your mock response + - define headers to be provided with the response + - define the body that will be returned in the response (note that this must be defined as a JSON schema) + + Configuring the mock response + + Select **UPDATE MIDDLEWARE** to apply the change to the middleware configuration. + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +#### Automatic configuration + +1. **Import an OpenAPI Document containing sample responses or schema** + + Import your OpenAPI Document (from file, URL or by pasting the JSON into the text editor) configure the **upstream URL** and **listen path**, and select **Auto-generate middleware to deliver mock responses**. + + Selecting this option will cause Tyk Dashboard to check for sample responses or schema in the OpenAPI description and will automatically add the Mock Response middleware for any endpoints that have suitable data. + + Configuring the OpenAPI document import to create Mock Responses + +2. **Edit the Mock Response middleware** + + Select **EDIT** and then the **Mock Response** middleware from the **Endpoints** tab. This will take you to the Edit Middleware screen. Note that *Use mock response from Open API Specification* has been selected. + + Editing the Mock Response middleware + +3. **Configure the middleware** + + Tyk Dashboard will automatically select a valid HTTP response code from the drop-down. When you select a valid `content-type` for which a mock response is configured in the OpenAPI specification, the API Designer will display the associated response. + + Mock Response middleware automatically configured from OpenAPI description + + Here you can edit the mock response: + - modify, add or delete Response Body examples (note that this must follow the selected `content-type`) + - choose a default Response Body example that will be provided (unless [overridden in the request](#multiple-mock-responses-for-a-single-endpoint)) + - add a description for your mock response + - define headers to be provided with the response (note that these must be defined as a JSON schema) + - add a schema + + You can create and edit mock responses for multiple HTTP status codes by choosing a different status code from the drop-down. + + Select **UPDATE MIDDLEWARE** to apply the change to the middleware configuration. + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + + + + + Modifying the automatically configured Mock Response middleware will update the OpenAPI description part of your Tyk OAS API definition, as the detail of the mock response is not set in the `x-tyk-api-gateway` extension but is automatically generated in response to the particular request received to the endpoint. + + + +## Using Classic + + +The [Mock Response](/api-management/traffic-transformation/mock-response) middleware allows you to configure Tyk to return a response for an API endpoint without requiring an upstream service. This can be useful when creating a new API or making a development API available to an external team. + +When working with Tyk Classic APIs, this middleware is executed at the start of the request processing chain. Thus an endpoint with the mock response middleware will not be authenticated, will not process other middleware configured for the API (neither API nor endpoint level) and will have no analytics created. It will simply return the configured response for any request made to that endpoint. + +The middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API, the API Designer or in [Tyk Operator](#tyk-operator). + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#mock-response-using-tyk-oas) page. + +### API Definition + +If you're using Tyk Operator then check out the [configuring the middleware in Tyk Operator](#tyk-operator) section below. + +To enable mock response, you must first add the endpoint to a list - one of [allow list](/api-management/traffic-transformation/allow-list), [block list](/api-management/traffic-transformation/block-list) or [ignore authentication](/api-management/traffic-transformation/ignore-authentication). This will add a new object to the `extended_paths` section of your API definition - `white_list`, `black_list` or `ignored`. The mock response can then be configured within the `method_actions` element within the new object. + +The `white_list`, `black_list` and `ignored` objects all have the same structure and configuration as follows: + +- `path`: the endpoint path +- `method`: this should be blank +- `ignore_case`: if set to `true` then the path matching will be case insensitive +- `method_actions`: the configuration of the mock response + +The `method_actions` object should be configured as follows, with an entry created for each method on the path for which you wish to configure the mock response: + +- `action`: this should be set to `reply` +- `code`: the HTTP status code to be provided with the response +- `headers`: the headers to inject with the response +- `data`: the payload to be returned as the body of the response + +For example: + +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "white_list": [ + { + "disabled": false, + "path": "/anything", + "method": "", + "ignore_case": false, + "method_actions": { + "GET": { + "action": "reply", + "code": 200, + "data": "This is the mock response body", + "headers": { + "X-Example-Header": "foobar" + } + } + } + } + ] + } +} +``` + +In this example the mock response middleware has been configured for requests to the `GET /anything` endpoint. The [allow list](/api-management/traffic-transformation/allow-list) middleware has been enabled for this endpoint and is case sensitive, so calls to `GET /Anything` will not return the mock response. + +A call to `GET /anything` would return: + +``` +HTTP/1.1 200 OK +X-Example-Header: foobar +Date: Wed, 31 Jan 2024 16:21:05 GMT +Content-Length: 30 +Content-Type: text/plain; charset=utf-8 + +This is the mock response body +``` + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the Mock Response middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and configure a list plugin** + + For the mock response to be enabled, the endpoint must also be in a list. We recommend adding the path to an allow list by [selecting](/api-management/traffic-transformation/allow-list#api-definition) the **Allow List** plugin. + +2. **Add the mock response plugin** + + Now select the **Mock response** plugin. + + Selecting the mock response middleware for a Tyk Classic API + +3. **Configure the middleware** + + Once you have selected the Mock response middleware for the endpoint, you can configure the HTTP status code, headers and body to be included in the response. Remember to click **ADD**, to add each header to the response. + + Configuring the mock response middleware for a Tyk Classic API + +4. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + + + + + For the mock response to be enabled, the endpoint must also be in a list. We recommend adding the path to an [allow list](/api-management/traffic-transformation/allow-list#allow-list-using-tyk-oas). If this isn't done, then the mock will not be saved when you save your API in the designer. + + + +### Tyk Operator + +The process of configuring a mock response is similar to that defined in the configuring the middleware in Tyk Classic API definition section. + +To configure a mock response, you must first add a mock response configuration object to the `extended_paths` section, e.g. one of allow list (`white_list`), block list (`black_list`) or ignore authentication (`ignore`). The mock response configuration object has the following properties: + +- path: the path of the endpoint, e.g `/foo`. +- ignore_case: when set to true the path matching is case insensitive. +- method_actions: a configuration object that allows the mock response to be configured for a given method, including the response body, response headers and status code. This object should also contain an `action` field with a value set to `reply`. + +In the example below we can see that a mock response is configured to ignore authentication (`ignore`) for the `GET /foo` endpoint. When a request is made to the endpoint then authentication will be ignored and a mock response is returned with status code `200` and a response body payload of `{"foo": "bar"}`. The middleware has been configured to be case sensitive, so calls to `GET /Foo` will not ignore authentication. + +```yaml {linenos=true, linenostart=1, hl_lines=["26-34"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + protocol: http + active: true + use_keyless: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + ignored: + - ignore_case: false + method_actions: + GET: + action: "reply" + code: 200 + data: "{\"foo\": \"bar\"}" + headers: {} + path: /foo +``` + + diff --git a/api-management/traffic-transformation/request-body.mdx b/api-management/traffic-transformation/request-body.mdx new file mode 100644 index 000000000..bc3317c1f --- /dev/null +++ b/api-management/traffic-transformation/request-body.mdx @@ -0,0 +1,395 @@ +--- +title: "Request Body" +description: "How to configure Request Body traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Request Body" +sidebarTitle: "Request Body " +--- + +## Overview + +Tyk enables you to modify the payload of API requests before they are proxied to the upstream. This makes it easy to transform between payload data formats or to expose legacy APIs using newer schema models without having to change any client implementations. This middleware is only applicable to HTTP methods that can support a request body (i.e. PUT, POST or PATCH). + +With the body transform middleware you can modify XML or JSON formatted payloads to ensure that the response contains the information required by your upstream service. You can enrich the request by adding contextual data that is held by Tyk but not included in the original request from the client. + +This middleware changes only the payload and not the headers. You can, however, combine this with the [Request Header Transform](/api-management/traffic-transformation/request-headers) middleware to apply more complex transformation to requests. + +There is a closely related [Response Body Transform](/api-management/traffic-transformation/response-body) middleware that provides the same functionality on the response from the upstream, prior to it being returned to the client. + +### Use Cases + +#### Maintaining compatibility with legacy clients + +Sometimes you might have a legacy API and need to migrate the transactions to a new upstream service but do not want to upgrade all the existing clients to the newer upstream API. Using request body transformation, you can convert the incoming legacy XML or JSON request structure into a newer, cleaner JSON format that your upstream services expect. + +#### Shaping requests received from different devices + +You can detect device types via headers or context variables and transform the request payload to optimize it for that particular device. For example, you might send extra metadata to the upstream for mobile apps. + +#### SOAP to REST translation + +A common use of the request body transform middleware is to surface a legacy SOAP service with a REST API. Full details of how to perform this conversion using Tyk are provided [here](/advanced-configuration/transform-traffic/soap-rest). + +### Working + +Tyk's body transform middleware uses the [Go template language](https://golang.org/pkg/text/template/) to parse and modify the provided input. We have bundled the [Sprig Library (v3)](http://masterminds.github.io/sprig/) which provides over 70 pre-written functions for transformations to assist the creation of powerful Go templates to transform your API requests. + +The Go template can be defined within the API Definition or can be read from a file that is accessible to Tyk, for example alongside your [error templates](/api-management/gateway-events#error-templates). + +We have provided more detail, links to reference material and some examples of the use of Go templating [here](/api-management/traffic-transformation/go-templates). + + + +Tyk evaluates templates stored in files on startup, so if you make changes to a template you must remember to restart the gateway. + + + +#### Supported request body formats + +The body transformation middleware can modify request payloads in the following formats: +- JSON +- XML + +When working with JSON format data, the middleware will unmarshal the data into a data structure, and then make that data available to the template in dot-notation. + +#### Data accessible to the middleware + +The middleware has direct access to the request body and also to dynamic data as follows: + - [context variables](/api-management/traffic-transformation/request-context-variables), extracted from the request at the start of the middleware chain, can be injected into the template using the `._tyk_context.KEYNAME` namespace + - [session metadata](/api-management/policies#what-is-a-session-metadata), from the Tyk Session Object linked to the request, can be injected into the template using the `._tyk_meta.KEYNAME` namespace + - inbound form or query data can be accessed through the `._tyk_context.request_data` namespace where it will be available in as a `key:[]value` map + - values from [key-value (KV) storage](/tyk-configuration-reference/kv-store#transformation-middleware) can be injected into the template using the notation appropriate to the location of the KV store + +The request body transform middleware can iterate through list indices in dynamic data so, for example, calling `{{ index ._tyk_context.request_data.variablename 0 }}` in a template will expose the first entry in the `request_data.variablename` key/value array. + + + +As explained in the [documentation](https://pkg.go.dev/text/template), templates are executed by applying them to a data structure. The template receives the decoded JSON or XML of the request body. If session variables or meta data are enabled, additional fields will be provided: `_tyk_context` and `_tyk_meta` respectively. + + + +#### Automatic XML <-> JSON Transformation + +A very common transformation that is applied in the API Gateway is to convert between XML and JSON formatted body content. + +The Request Body Transform supports two helper functions that you can use in your Go templates to facilitate this: + - `jsonMarshal` performs JSON style character escaping on an XML field and, for complex objects, serialises them to a JSON string ([example](/api-management/traffic-transformation/go-templates#xml-to-json-conversion-using-jsonmarshal)) + - `xmlMarshal` performs the equivalent conversion from JSON to XML ([example](/api-management/traffic-transformation/go-templates#json-to-xml-conversion-using-xmlmarshal)) + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Request Body Transform middleware summary + - The Request Body Transform middleware is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Request Body Transform middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. + - Request Body Transform can access both [session metadata](/api-management/policies#what-is-a-session-metadata) and [request context variables](/api-management/traffic-transformation/request-context-variables). */} + +## Using Tyk OAS + + +The [request body transform](/api-management/traffic-transformation/request-body) middleware provides a way to modify the payload of API requests before they are proxied to the upstream. + +The middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#request-body-using-classic) page. + +### API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The request body transformation middleware (`transformRequestBody`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `transformRequestBody` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `format`: the format of input data the parser should expect (either `xml` or `json`) +- `body`: [see note] this is a `base64` encoded representation of your template +- `path`: [see note] this is the path to the text file containing the template + + + + + You should configure only one of `body` or `path` to indicate whether you are embedding the template within the middleware or storing it in a text file. The middleware will automatically select the correct source based on which of these fields you complete. If both are provided, then `body` will take precedence and `path` will be ignored. + + + +For example: +```json {hl_lines=["39-43"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-request-body-transform", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "put": { + "operationId": "anythingput", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-request-body-transform", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-request-body-transform/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingput": { + "transformRequestBody": { + "enabled": true, + "format": "json", + "body": "ewogICJ2YWx1ZTEiOiAie3sudmFsdWUyfX0iLAogICJ2YWx1ZTIiOiAie3sudmFsdWUxfX0iLAogICJyZXEtaGVhZGVyIjogInt7Ll90eWtfY29udGV4dC5oZWFkZXJzX1hfSGVhZGVyfX0iLAogICJyZXEtcGFyYW0iOiAie3suX3R5a19jb250ZXh0LnJlcXVlc3RfZGF0YS5wYXJhbX19Igp9" + } + } + } + } + } +} +``` + +In this example the request body transform middleware has been configured for requests to the `PUT /anything` endpoint. The `body` contains a base64 encoded Go template (which you can check by pasting the value into a service such as [base64decode.org](https://www.base64decode.org)). + +Decoded, this template is: +```json +{ + "value1": "{{.value2}}", + "value2": "{{.value1}}", + "req-header": "{{._tyk_context.headers_X_Header}}", + "req-param": "{{._tyk_context.request_data.param}}" +} +``` + +So if you make a request to `PUT /anything?param=foo` as follows: +```bash +PUT /anything?param=foo +HTTP/1.1 +Host: my-gateway.host +X-Header: bar + +{ + "value1": "world", + "value2": "hello" +} +``` + +You will receive a response from the upstream with this payload: +```json +{ + "req-header": "bar", + "req-param": "[foo]", + "value1": "hello", + "value2": "world" +} +``` + +The `/anything` endpoint returns the details of the request that was received by httpbin.org. You can see that Tyk has swapped `value1` and `value2` and embedded the `X-Header` header and `param` query values into the body of the request. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the mock response middleware. + + + +If using a template in a file (i.e. you configure `path` in the `transformRequestBody` object), remember that Tyk will load and evaluate the template when the Gateway starts up. If you modify the template, you will need to restart Tyk in order for the changes to take effect. + + + +### API Designer + +Adding Request Body Transformation to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow the following steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Request Body Transform middleware** + + Select **ADD MIDDLEWARE** and choose the **Request Body Transform** middleware from the *Add Middleware* screen. + + Adding the Request Body Transform middleware + +3. **Configure the middleware** + + Now you can select the request body format (JSON or XML) and add either a path to the file containing the template, or directly enter the transformation template in the text box. + + Configuring the Request Body Transform middleware + + The **Test with data** control will allow you to test your body transformation function by providing an example request body and generating the output from the transform. It is not possible to configure headers, other request parameters, context or session metadata to this template test so if you are using these data sources in your transform it will not provide a complete output, for example: + + Testing the Request Body Transform + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +## Using Classic + + +The [request body transform](/api-management/traffic-transformation/request-body) middleware provides a way to modify the payload of API requests before they are proxied to the upstream. + +This middleware is configured in the Tyk Classic API Definition at the endpoint level. You can do this via the Tyk Dashboard API or in the API Designer. + +If you want to use dynamic data from context variables, you must [enable](/api-management/traffic-transformation/request-context-variables#enabling-context-variables-for-use-with-tyk-classic-apis) context variables for the API to be able to access them from the request header transform middleware. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#request-body-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [Configuring the middleware in Tyk Operator](#tyk-operator) section below. + +### API Definition + +To enable the middleware you must add a new `transform` object to the `extended_paths` section of your API definition. + +The `transform` object has the following configuration: +- `path`: the path to match on +- `method`: this method to match on +- `template_data`: details of the Go template to be applied for the transformation of the request body + +The Go template is described in the `template_data` object by the following fields: +- `input_type`: the format of input data the parser should expect (either `xml` or `json`) +- `enable_session`: set this to `true` to make session metadata available to the transform template +- `template_mode`: instructs the middleware to look for the template either in a `file` or in a base64 encoded `blob`; the actual file location (or base64 encoded template) is provided in `template_source` +- `template_source`: if `template_mode` is set to `file`, this will be the path to the text file containing the template; if `template_mode` is set to `blob`, this will be a `base64` encoded representation of your template + +For example: +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "transform": [ + { + "path": "/anything", + "method": "POST", + "template_data": { + "template_mode": "file", + "template_source": "./templates/transform_test.tmpl", + "input_type": "json", + "enable_session": true + } + } + ] + } +} +``` + +In this example, the Request Body Transform middleware is directed to use the template located in the `file` at location `./templates/transform_test.tmpl`. The input (pre-transformation) request payload will be `json` format and session metadata will be available for use in the transformation. + + + +Tyk will load and evaluate the template file when the Gateway starts up. If you modify the template, you will need to restart Tyk in order for the changes to take effect. + + + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the request body transform middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + +From the **Endpoint Designer** add an endpoint that matches the path for which you want to perform the transformation. Select the **Body Transforms** plugin. + +Endpoint designer + +2. **Configure the middleware** + +Ensure that you have selected the `REQUEST` tab, then select your input type, and then add the template you would like to use to the **Template** input box. + +Setting the body request transform + +3. **Test the Transform** + +If sample input data is available, you can use the Input box to add it, and then test it using the **Test** button. You will see the effect of the template on the sample input displayed in the Output box. + +Testing the body transform function + +4. **Save the API** + +Use the *save* or *create* buttons to save the changes and activate the Request Body Transform middleware. + +### Tyk Operator + +The process for configuring a request body transform is similar to that defined in section configuring the middleware in the Tyk Classic API Definition. Tyk Operator allows you to configure a request body transform by adding a `transform` object to the `extended_paths` section of your API definition. + +In the example below the Request Body middleware (`transform`) has been configured for `HTTP POST` requests to the `/anything` endpoint. The Request Body Transform middleware is directed to use the template located in the blob included in the `template_source` field. The input (pre-transformation) request payload will be json format and session metadata will be available for use in the transformation. + +```yaml {linenos=true, linenostart=1, hl_lines=["32-40"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-transform +spec: + name: httpbin-transform + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-transform + strip_listen_path: true + response_processors: + - name: response_body_transform + - name: header_injector + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + transform: + - method: POST + path: /anything + template_data: + enable_session: false + input_type: json + template_mode: blob + # base64 encoded template + template_source: eyJiYXIiOiAie3suZm9vfX0ifQ== + transform_headers: + - delete_headers: + - "remove_this" + add_headers: + foo: bar + path: /anything + method: POST + transform_response: + - method: GET + path: /xml + template_data: + enable_session: false + input_type: xml + template_mode: blob + # base64 encoded template + template_source: e3sgLiB8IGpzb25NYXJzaGFsIH19 + transform_response_headers: + - method: GET + path: /xml + add_headers: + Content-Type: "application/json" + act_on: false + delete_headers: [] +``` + diff --git a/api-management/traffic-transformation/request-context-variables.mdx b/api-management/traffic-transformation/request-context-variables.mdx new file mode 100644 index 000000000..e57ac503e --- /dev/null +++ b/api-management/traffic-transformation/request-context-variables.mdx @@ -0,0 +1,110 @@ +--- +title: "Request Context Variables" +description: "How to configure Request Context Variables traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Request Context Variables" +sidebarTitle: "Request Context Variables" +--- + +Context variables are extracted from the request at the start of the middleware chain. These values can be very useful for later transformation of request data, for example, in converting a form POST request into a JSON PUT request or to capture an IP address as a header. + + + +When using Tyk Classic APIs, you must [enable](#enabling-context-variables-for-use-with-tyk-classic-apis) context variables for the API to be able to access them. When using Tyk OAS APIs, the context variables are always available to the context-aware middleware. + + + + +## Available context variables +* `request_data`: If the inbound request contained any query data or form data, it will be available in this object. For the header injector Tyk will format this data as `key:value1,value2,valueN;key:value1,value2` etc. +* `path_parts`: The components of the path, split on `/`. These values should be in the format of a comma delimited list. +* `token`: The inbound raw token (if bearer tokens are being used) of this user. +* `path`: The path that is being requested. +* `remote_addr`: The IP address of the connecting client. +* `request_id` Allows the injection of request correlation ID (for example X-Request-ID) +* `jwt_claims_CLAIMNAME` - If JWT tokens are being used, then each claim in the JWT is available in this format to the context processor. `CLAIMNAME` is case sensitive so use the exact claim. +* `cookies_COOKIENAME` - If there are cookies, then each cookie is available in context processor in this format. `COOKIENAME` is case sensitive so use the exact cookie name and replace any `-` in the cookie name with `_`. +* `headers_HEADERNAME` - Headers are obviously exposed in context processor. You can access any header in the request using the following format: Convert the **first letter** in each word of an incoming header is to Capital Case. This is due to the way GoLang handles header parsing. You also need to replace any `-` in the `HEADERNAME` name with `_`.
+For example, to get the value stored in `test-header`, the syntax would be `$tyk_context.headers_Test_Header`. + + +## Middleware that can use context variables: +Context variables are exposed in three middleware plugins but are accessed differently depending on the caller as follows: + +1. URL Rewriter - Syntax is `$tyk_context.CONTEXTVARIABLES`. See [URL Rewriting](/transform-traffic/url-rewriting#url-rewrite-middleware) for more details. +2. Modify Headers - Syntax is `$tyk_context.CONTEXTVARIABLES`. See [Request Headers](/api-management/traffic-transformation/request-headers) for more details. +3. Body Transforms - Syntax is `{{ ._tyk_context.CONTEXTVARIABLES }}`. See [Body Transforms](/api-management/traffic-transformation/request-body) for more details. + + + + + The Body Transform can fully iterate through list indices within context data so, for example, calling `{{ index ._tyk_context.path_parts 0 }}` in the Go Template in a Body Transform will expose the first entry in the `path_parts` list. + + URL Rewriter and Header Transform middleware cannot iterate through list indices. + + + + +## Example use of context variables + +### Examples of the syntax to use with all the available context varibles: +``` +"x-remote-addr": "$tyk_context.remote_addr", +"x-token": "$tyk_context.token", +"x-jwt-sub": "$tyk_context.jwt_claims_sub", +"x-part-path": "$tyk_context.path_parts", +"x-jwt-pol": "$tyk_context.jwt_claims_pol", +"x-cookie": "$tyk_context.cookies_Cookie_Context_Var", +"x-cookie-sensitive": "$tyk_context.cookies_Cookie_Case_sensitive", +"x-my-header": "$tyk_context.headers_My_Header", +"x-path": "$tyk_context.path", +"x-request-data": "$tyk_context.request_data", +"x-req-id": "$tyk_context.request_id" +``` +Example of the syntax in the UI + +### The context variable values in the response: +``` +"My-Header": "this-is-my-header", +"User-Agent": "PostmanRuntime/7.4.0", +"X-Cookie": "this-is-my-cookie", +"X-Cookie-Sensitive": "case-sensitive", +"X-Jwt-Pol": "5bca6a739afe6a00017eb267", +"X-Jwt-Sub": "john.doe@test.com", +"X-My-Header": "this-is-my-header", +"X-Part-Path": "context-var-example,anything", +"X-Path": "/context-var-example/anything", +"X-Remote-Addr": "127.0.0.1", +"X-Req-Id": "e3e99350-b87a-4d7d-a75f-58c1f89b2bf3", +"X-Request-Data": "key1:val1;key2:val2", +"X-Token": "5bb2c2abfb6add0001d65f699dd51f52658ce2d3944d3d6cb69f07a2" +``` + +## Enabling Context Variables for use with Tyk Classic APIs +1. In the your Tyk Dashboard, select `APIs` from the `System Management` menu +2. Open the API you want to add Context Variable to +3. Click the `Advanced Options` tab and then select the `Enable context variables` option + +Context Variables + +If not using a Tyk Dashboard, add the field `enable_context_vars` to your API definition file at root level and set it to `true`. + +If you are using Tyk Operator, set the field `spec.enable_context_vars` to `true`. + +The example API Definition below enabled context variable: + +```yaml {linenos=true, linenostart=1, hl_lines=["10-10"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: http + active: true + enable_context_vars: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` diff --git a/api-management/traffic-transformation/request-headers.mdx b/api-management/traffic-transformation/request-headers.mdx new file mode 100644 index 000000000..d62a98446 --- /dev/null +++ b/api-management/traffic-transformation/request-headers.mdx @@ -0,0 +1,554 @@ +--- +title: "Request Headers" +description: "How to configure Request Headers traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Request Headers" +sidebarTitle: "Request Headers " +--- + +## Overview + +Tyk allows you to modify the headers of incoming requests to your API endpoints before they are passed to your upstream service. + +There are two options for this: +- API-level modification that is applied to all requests to the API +- endpoint-level modification that is applied only to requests to a specific endpoint + +With the header transform middleware you can append or delete any number of headers to ensure that the request contains the information required by your upstream service. You can enrich the request by adding contextual data that is held by Tyk but not included in the original request from the client. + +This middleware changes only the headers and not the method or payload. You can, however, combine this with the [Request Method Transform](/api-management/traffic-transformation/request-method) and [Request Body Tranform](/api-management/traffic-transformation/request-body) to apply more complex transformation to requests. + +There are related [Response Header Transform](/api-management/traffic-transformation/response-headers) middleware (at API-level and endpoint-level) that provide the same functionality on the response from your upstream, prior to it being returned to the client. + +### Use Cases + +#### Adding Custom Headers + +A common use of this feature is to add custom headers to requests, such as adding a secure header to all upstream requests (to verify that traffic is coming from the gateway), or adding a timestamp for tracking purposes. + +#### Modifying Headers for Compatibility + +You could use the request header transform middleware to modify headers for compatibility with a downstream system, such as changing the Content-Type header from "application/json" to "application/xml" for an API that only accepts XML requests while using the [Request Body Tranform](/api-management/traffic-transformation/request-body) to transform the payload. + +#### Prefixing or Suffixing Headers + +Upstream systems or corporate policies might mandate that a prefix or suffix is added to header names, such as adding a "Bearer" prefix to all Authorization headers for easier identification internally, without modifying the externally published API consumed by the client applications. + +#### Adding multi-user access to a service + +You can add multi-user access to an upstream API that has a single authentication key and you want to add multi-user access to it without modifying it or adding clunky authentication methods to it to support new users. + +### Working + +The request header transform can be applied per-API or per-endpoint; each has a separate entry in the API definition so that you can configure both API-level and endpoint-level transforms for a single API. + +The middleware is configured with a list of headers to delete from the request and a list of headers to add to the request. Each header to be added to the request is configured as a key:value pair. + +The "delete header" functionality is intended to ensure that any header in the delete list is not present once the middleware completes - so if a header is not originally present in the request but is on the list to be deleted, the middleware will ignore its omission. + +The "add header" functionality will capitalize any header name provided, for example if you configure the middleware to append `x-request-id` it will be added to the request as `X-Request-Id`. + +In the request middleware chain, the API-level transform is applied before the endpoint-level transform so if both middleware are enabled, the endpoint-level transform will operate on the headers that have been added by the API-level transform (and will not receive those that have been deleted by it). + +#### Injecting dynamic data into headers + +You can enrich the request headers by injecting data from context variables or session objects into the headers. +- [context variables](/api-management/traffic-transformation/request-context-variables) are extracted from the request at the start of the middleware chain and can be injected into added headers using the `$tyk_context.` namespace +- [session metadata](/api-management/policies#what-is-a-session-metadata), from the Tyk Session Object linked to the request, can be injected into added headers using the `$tyk_meta.` namespace +- values from [key-value (KV) storage](/tyk-configuration-reference/kv-store#transformation-middleware) can be injected into added headers using the notation appropriate to the location of the KV store + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Request Header Transform middleware summary + - The Request Header Transform is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Request Header Transform can be configured at the per-endpoint or per-API level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + +## Using Tyk OAS + + +Tyk's [request header transform](/api-management/traffic-transformation/request-headers) middleware enables you to append or delete headers on requests to your API endpoints before they are passed to your upstream service. + +There are two options for this: +- API-level modification that is applied to all requests to the API +- endpoint-level modification that is applied only to requests to a specific endpoint + + + + + If both API-level and endpoint-level middleware are configured, the API-level transformation will be applied first. + + + +When working with Tyk OAS APIs the transformation is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#request-headers-using-classic) page. + +### API Definition + +The API-level and endpoint-level request header transforms are configured in different sections of the API definition, though have a common configuration. + +### API-level transform + +To append headers to, or delete headers from, all requests to your API (i.e. for all endpoints) you must add a new `transformRequestHeaders` object to the `middleware.global` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition. + +You only need to enable the middleware (set `enabled:true`) and then configure the details of headers to `add` and those to `remove`. + +For example: +```json {hl_lines=["38-56"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-request-header", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/status/200": { + "get": { + "operationId": "status/200get", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-request-header", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-request-header/", + "strip": true + } + }, + "middleware": { + "global": { + "transformRequestHeaders": { + "enabled": true, + "remove": [ + "Auth_Id" + ], + "add": [ + { + "name": "X-Static", + "value": "foobar" + }, + { + "name": "X-Request-ID", + "value": "$tyk_context.request_id" + }, + { + "name": "X-User-ID", + "value": "$tyk_meta.uid" + } + ] + } + } + } + } +} +``` + +This configuration will add three new headers to each request: +- `X-Static` with the value `foobar` +- `X-Request-ID` with a dynamic value taken from the `request_id` [context variables](/api-management/traffic-transformation/request-context-variables) +- `X-User-ID` with a dynamic value taken from the `uid` field in the [session metadata](/api-management/policies#what-is-a-session-metadata) + +It will also delete one header (if present) from each request: +- `Auth_Id` + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the API-level request header transform. + +### Endpoint-level transform + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The request header transform middleware (`transformRequestHeaders`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `transformRequestHeaders` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `add`: a list of headers, in key:value pairs, to be appended to the request +- `remove`: a list of headers to be deleted from the request (if present) + +For example: +```json {hl_lines=["39-50"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-request-header", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/status/200": { + "get": { + "operationId": "status/200get", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-request-header", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-request-header/", + "strip": true + } + }, + "middleware": { + "operations": { + "status/200get": { + "transformRequestHeaders": { + "enabled": true, + "remove": [ + "X-Static" + ], + "add": [ + { + "name": "X-Secret", + "value": "the-secret-key-is-secret" + } + ] + } + } + } + } + } +} +``` + +In this example the Request Header Transform middleware has been configured for requests to the `GET /status/200` endpoint. Any request received to that endpoint will have the `X-Static` header removed and the `X-Secret` header added, with the value set to `the-secret-key-is-secret`. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the endpoint-level request header transform. + +### Combining API-level and Endpoint-level transforms + +If the API-level transform in the previous [example](/api-management/traffic-transformation/request-headers#api-level-transform) is applied to the same API, then because the API-level transformation is performed first, the `X-Static` header will be added (by the API-level transform) and then removed (by the endpoint-level transform) such that the overall effect of the two transforms for a call to `GET /status/200` would be to add three headers: + - `X-Request-ID` + - `X-User-ID` + - `X-Secret` + +and to remove one: + - `Auth_Id` + +### API Designer + +Adding and configuring the transforms to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +### Adding an API-level transform + +From the **API Designer** on the **Settings** tab, after ensuring that you are in *edit* mode, toggle the switch to **Enable Transform request headers** in the **Middleware** section: +Tyk OAS API Designer showing API-level Request Header Transform + +Then select **NEW HEADER** as appropriate to add or remove a header from API requests. You can add or remove multiple headers by selecting **ADD HEADER** to add another to the list: +Configuring the API-level Request Header Transform in Tyk OAS API Designer + +### Adding an endpoint level transform + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Request Header Transform middleware** + + Select **ADD MIDDLEWARE** and choose the **Request Header Transform** middleware from the *Add Middleware* screen. + + Adding the Request Header Transform middleware + +3. **Configure header transformation** + + Select **NEW HEADER** to configure a header to be added to or removed from the request. + + Configuring the Request Header transformation + + You can add multiple headers to either list by selecting **NEW HEADER** again. + + Adding another header to the transformation + +4. **Save the API** + + Select **ADD MIDDLEWARE** to save the middleware configuration. Remember to select **SAVE API** to apply the changes. + +## Using Classic + + +Tyk's [request header transform](/api-management/traffic-transformation/request-headers) middleware enables you to append or delete headers on requests to your API endpoints before they are passed to your upstream service. + +There are two options for this: +- API-level modification that is applied to all requests to the API +- endpoint-level modification that is applied only to requests to a specific endpoint + + + + + If both API-level and endpoint-level middleware are configured, the API-level transformation will be applied first. + + + +When working with Tyk Classic APIs the transformation is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you want to use dynamic data from context variables, you must [enable](/api-management/traffic-transformation/request-context-variables#enabling-context-variables-for-use-with-tyk-classic-apis) context variables for the API to be able to access them from the request header transform middleware. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#request-headers-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the Request Header Transform in Tyk Operator](#tyk-operator) section below. + +### API Definition + +The API-level and endpoint-level request header transforms have a common configuration but are configured in different sections of the API definition. + +#### API-level transform + + +To **append** headers to all requests to your API (i.e. for all endpoints) you must add a new `global_headers` object to the `versions` section of your API definition. This contains a list of key:value pairs, being the names and values of the headers to be added to requests. + +To **delete** headers from all requests to your API, you must add a new `global_headers_remove` object to the `versions` section of the API definition. This contains a list of the names of existing headers to be removed from requests. + +For example: +```json {hl_lines=["39-45"],linenos=true, linenostart=1} +{ + "version_data": { + "versions": { + "Default": { + "global_headers": { + "X-Static": "foobar", + "X-Request-ID":"$tyk_context.request_id", + "X-User-ID": "$tyk_meta.uid" + }, + "global_headers_remove": [ + "Auth_Id" + ] + } + } + }, +} +``` + +This configuration will add three new headers to each request: +- `X-Static` with the value `foobar` +- `X-Request-ID` with a dynamic value taken from the `request_id` [context variables](/api-management/traffic-transformation/request-context-variables) +- `X-User-ID` with a dynamic value taken from the `uid` field in the [session metadata](/api-management/policies#what-is-a-session-metadata) + +It will also delete one header (if present) from each request: +- `Auth_Id` + +#### Endpoint-level transform + + +To configure a transformation of the request header for a specific endpoint you must add a new `transform_headers` object to the `extended_paths` section of your API definition. + +It has the following configuration: +- `path`: the endpoint path +- `method`: the endpoint HTTP method +- `delete_headers`: A list of the headers that should be deleted from the request +- `add_headers`: A list of headers, in key:value pairs, that should be added to the request + +The `path` can contain wildcards in the form of any string bracketed by curly braces, for example `{user_id}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +For example: +```json +{ + "transform_headers": [ + { + "path": "status/200", + "method": "GET", + "delete_headers": ["X-Static"], + "add_headers": {"X-Secret": "the-secret-key-is-secret"} + } + ] +} +``` + +In this example the Request Header Transform middleware has been configured for HTTP `GET` requests to the `/status/200` endpoint. Any request received to that endpoint will have the `X-Static` header removed and the `X-Secret` header added, with the value set to `the-secret-key-is-secret`. + +#### Combining API-level and Endpoint-level transforms + +If the API-level transform in the previous [example](/api-management/traffic-transformation/request-headers#api-level-transform) is applied to the same API, then because the API-level transformation is performed first, the `X-Static` header will be added (by the API-level transform) and then removed (by the endpoint-level transform) such that the overall effect of the two transforms for a call to `GET /status/200` would be to add three headers: +- `X-Request-ID` +- `X-User-ID` +- `X-Secret` + +and to remove one: +- `Auth_Id` + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the request header transform middleware for your Tyk Classic API by following these steps. + +#### API-level transform + +Configuring the API-level request header transform middleware is very simple when using the Tyk Dashboard. + +In the Endpoint Designer you should select the **Global Version Settings** and ensure that you have selected the **Request Headers** tab: + +Global version settings + +Note that you must click **ADD** to add a header to the list (for appending or deletion). + +#### Endpoint-level transform + +1. **Add an endpoint for the path and select the Header Transform plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to perform the transformation. Select the **Modify Headers** plugin. + + Endpoint designer + +2. **Select the "Request" tab** + + This ensures that this will only be applied to inbound requests. + + Request tab + +3. **Declare the headers to be modified** + + Select the headers to delete and insert using the provided fields. You need to click **ADD** to ensure they are added to the list. + + Header transforms + +4. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +### Tyk Operator + +The process for configuring a request header transform is similar to that defined in section Configuring the Request Header Transform in the Tyk Classic API Definition. Tyk Operator allows you to configure a request size limit for [all endpoints of an API](#tyk-operator-api) or for a [specific API endpoint](#tyk-operator-endpoint). + +#### API-level transform + + +Request headers can be removed and inserted using the following fields within an `ApiDefinition`: + +- `global_headers`: Mapping of key values corresponding to headers to add to API requests. +- `global_headers_remove`: List containing the name of headers to remove from API requests. + +The example below shows an `ApiDefinition` custom resource that adds *foo-req* and *bar-req* headers to the request before it is sent upstream. The *foo-req* header has a value of *foo-val* and the *bar-req* header has a value of *bar-val*. Furthermore, the *hello* header is removed from the request before it is sent upstream. + +```yaml {linenos=true, linenostart=1, hl_lines=["25-29"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-global-headers +spec: + name: httpbin-global-headers + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-global-headers + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + global_headers: + foo-req: my-foo + bar-req: my-bar + global_headers_remove: + - hello +``` + +#### Endpoint-level transform + + +The process of configuring a transformation of a request header for a specific endpoint is similar to that defined in section [Endpoint-level transform](#tyk-classic-endpoint). To configure a transformation of the request header for a specific endpoint you must add a new `transform_headers` object to the `extended_paths` section of your API definition. + +In the example below the Request Header Transform middleware (`transform_headers`) has been configured for HTTP `POST` requests to the `/anything` endpoint. Any request received to that endpoint will have the `remove_this` header removed and the `foo` header added, with the value set to `bar`. + +```yaml {linenos=true, linenostart=1, hl_lines=["41-47"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-transform +spec: + name: httpbin-transform + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-transform + strip_listen_path: true + response_processors: + - name: response_body_transform + - name: header_injector + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + transform: + - method: POST + path: /anything + template_data: + enable_session: false + input_type: json + template_mode: blob + # base64 encoded template + template_source: eyJiYXIiOiAie3suZm9vfX0ifQ== + transform_headers: + - delete_headers: + - "remove_this" + add_headers: + foo: bar + path: /anything + method: POST + transform_response: + - method: GET + path: /xml + template_data: + enable_session: false + input_type: xml + template_mode: blob + # base64 encoded template + template_source: e3sgLiB8IGpzb25NYXJzaGFsIH19 + transform_response_headers: + - method: GET + path: /xml + add_headers: + Content-Type: "application/json" + act_on: false + delete_headers: [] +``` + diff --git a/api-management/traffic-transformation/request-method.mdx b/api-management/traffic-transformation/request-method.mdx new file mode 100644 index 000000000..1f296b29d --- /dev/null +++ b/api-management/traffic-transformation/request-method.mdx @@ -0,0 +1,243 @@ +--- +title: "Request Method" +description: "How to configure Request Method traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Request Method" +sidebarTitle: "Request Method " +--- + +## Overview + +Tyk's Request Method Transform middleware allows you to modify the HTTP method of incoming requests to an API endpoint prior to the request being proxied to the upstream service. You might use this to map `POST` requests from clients to upstream services that support only `PUT` and `DELETE` operations, providing a modern interface to your users. It is a simple middleware that changes only the method and not the payload or headers. You can, however, combine this with the [Request Header Transform](/api-management/traffic-transformation/request-headers) and [Request Body Tranform](/api-management/traffic-transformation/request-body) to apply more complex transformation to requests. + +### Use Cases + +#### Simplifying API consumption + +In cases where an upstream API requires different methods (e.g. `PUT` or `DELETE`) for different functionality but you want to wrap this in a single client-facing API, you can provide a simple interface offering a single method (e.g. `POST`) and then use the method transform middleware to map requests to correct upstream method. + +#### Enforcing API governance and standardization + +You can use the transform middleware to ensure that all requests to a service are made using the same HTTP method, regardless of the original method used by the client. This can help maintain consistency across different client applications accessing the same upstream API. + +#### Error Handling and Redirection + +You can use the method transformation middleware to handle errors and redirect requests to different endpoints, such as changing a DELETE request to a GET request when a specific resource is no longer available, allowing for graceful error handling and redirection. + +#### Testing and debugging + +Request method transformation can be useful when testing or debugging API endpoints; temporarily changing the request method can help to identify issues or test specific functionalities. + +### Working + +This is a very simple middleware that is assigned to an endpoint and configured with the HTTP method to which the request should be modified. The Request Method Transform middleware modifies the request method for the entire request flow, not just for the specific upstream request, so all subsequent middleware in the processing chain will use the new (transformed) method. + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Request Method Transform middleware summary + - The Request Method Transform is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Request Method Transform is configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + +## Using Tyk OAS + + +Tyk's [request method transform](/api-management/traffic-transformation/request-method) middleware is configured at the endpoint level, where it modifies the HTTP method used in the request to a configured value. + +When working with Tyk OAS APIs the transformation is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#request-method-using-classic) page. + +### API Definition + +The request method transform middleware (`transformRequestMethod`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +You only need to enable the middleware (set `enabled:true`) and then configure `toMethod` as the new HTTP method to which the request should be transformed. The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the method should be transformed. + +All standard HTTP methods are supported: `GET`, `PUT`, `POST`, `PATCH`, `DELETE`, `HEAD`, `OPTIONS`. + +For example: +```json {hl_lines=["39-41"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-request-method", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/status/200": { + "get": { + "operationId": "status/200get", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-request-method", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-request-method/", + "strip": true + } + }, + "middleware": { + "operations": { + "status/200get": { + "transformRequestMethod": { + "enabled": true, + "toMethod": "POST" + } + } + } + } + } +} +``` + +In this example the Request Method Transform middleware has been configured for requests to the `GET /status/200` endpoint. Any request received to that endpoint will be modified to `POST /status/200`. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the request method transform. + +### API Designer + +Adding the transform to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Method Transform middleware** + + Select **ADD MIDDLEWARE** and choose the **Method Transform** middleware from the *Add Middleware* screen. + + Adding the Request Method Transform middleware + +3. **Configure the middleware** + + Select the new HTTP method to which requests to this endpoint should be transformed + + Selecting the new HTTP method for requests to the endpoint + + Select **ADD MIDDLEWARE** to apply the change to the middleware configuration. + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +## Using Classic + + +Tyk's [request method transform](/api-management/traffic-transformation/request-method) middleware is configured at the endpoint level, where it modifies the HTTP method used in the request to a configured value. + +When working with Tyk Classic APIs the transformation is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#request-method-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring a Request Method Transform in Tyk Operator](#tyk-operator) section below. + +### API Definition + +To configure a transformation of the request method you must add a new `method_transforms` object to the `extended_paths` section of your API definition. + +It has the following configuration: +- `path`: the endpoint path +- `method`: the endpoint HTTP method +- `to_method`: The new HTTP method to which the request should be transformed + +All standard HTTP methods are supported: `GET`, `PUT`, `POST`, `PATCH`, `DELETE`, `HEAD`, `OPTIONS`. + +For example: +```json +{ + "method_transforms": [ + { + "path": "/status/200", + "method": "GET", + "to_method": "POST" + } + ] +} +``` + +In this example the Request Method Transform middleware has been configured for HTTP `GET` requests to the `/status/200` endpoint. Any request received to that endpoint will be modified to `POST /status/200`. + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the request method transform middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the Method Transform plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to perform the transformation. Select the **Method Transform** plugin. + + Method Transform + +2. **Configure the transform** + + Then select the HTTP method to which you wish to transform the request. + + Method Path + +3. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +### Tyk Operator + +The process for configuring a request method transform for an endpoint in Tyk Operator is similar to that defined in section configuring a Request Method Transform in the Tyk Classic API Definition. + +To configure a transformation of the request method you must add a new `method_transforms` object to the `extended_paths` section of your API definition: + +```yaml {linenos=true, linenostart=1, hl_lines=["26-29"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.default.svc:8000 + listen_path: /transform + strip_listen_path: true + version_data: + default_version: v1 + not_versioned: true + versions: + v1: + name: v1 + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + method_transforms: + - path: /anything + method: GET + to_method: POST +``` + +The example API Definition above configures an API to listen on path `/transform` and forwards requests upstream to http://httpbin.org. + +In this example the Request Method Transform middleware has been configured for `HTTP GET` requests to the `/anything` endpoint. Any request received to that endpoint will be modified to `POST /anything`. + diff --git a/api-management/traffic-transformation/request-size-limits.mdx b/api-management/traffic-transformation/request-size-limits.mdx new file mode 100644 index 000000000..78fe5ad32 --- /dev/null +++ b/api-management/traffic-transformation/request-size-limits.mdx @@ -0,0 +1,356 @@ +--- +title: "Request Size Limits" +description: "How to configure Request Size Limits traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Request Size Limits" +sidebarTitle: "Request Size Limits" +--- + +## Overview + +With Tyk, you can apply limits to the size of requests made to your HTTP APIs. You might use this feature to protect your Tyk Gateway or upstream services from excessive memory usage or brute force attacks. + +Tyk Gateway offers a flexible tiered system of limiting request sizes ranging from globally applied limits across all APIs deployed on the gateway down to specific size limits for individual API endpoints. + +### Use Case + +#### Protecting the entire Tyk Gateway from DDoS attacks +You can configure a system-wide request size limit that protects all APIs managed by the Tyk Gateway from being overwhelmed by excessively large requests, which could be part of a DDoS attack, ensuring the stability and availability of the gateway. + +#### Limiting request sizes for a lightweight microservice +You might expose an API for a microservice that is designed to handle lightweight, fast transactions and is not equipped to process large payloads. You can set an API-level size limit that ensures the microservice behind this API is not forced to handle requests larger than it is designed for, maintaining its performance and efficiency. + +#### Controlling the size of GraphQL queries +A GraphQL API endpoint might be susceptible to complex queries that can lead to performance issues. By setting a request size limit for the GraphQL endpoint, you ensure that overly complex queries are blocked, protecting the backend services from potential abuse and ensuring a smooth operation. + +#### Restricting upload size on a file upload endpoint +An API endpoint is designed to accept file uploads, but to prevent abuse, you want to limit the size of uploads to 1MB. To enforce this, you can enable the Request Size Limit middleware for this endpoint, configuring a size limit of 1MB. This prevents users from uploading excessively large files, protecting your storage and bandwidth resources. + +### Working + +Tyk compares each incoming API request with the configured maximum size for each level of granularity in order of precedence and will reject any request that exceeds the size you have set at any level of granularity, returning an HTTP 4xx error as detailed below. + +All size limits are stated in bytes and are applied only to the request _body_ (or payload), excluding the headers. + +| Precedence | Granularity | Error returned on failure | +| :------------ | :------------------ | :-------------------------------- | +| 1st | System (gateway) | `413 Request Entity Too Large` | +| 2nd | API | `400 Request is too large` | +| 3rd | Endpoint | `400 Request is too large` | + + + +The system level request size limit is the only size limit applied to [TCP](/key-concepts/tcp-proxy) and [Websocket](/advanced-configuration/websockets) connections. + + + +
+ +#### Applying a system level size limit +You can configure a request size limit (in bytes) that will be applied to all APIs on your Tyk Gateway by adding `max_request_body_size` to the `http_server_options` [element](/tyk-oss-gateway/configuration#http_server_optionsmax_request_body_size) of your `tyk.conf` Gateway configuration. For example: +```yaml +"max_request_body_size": 5000 +``` +A value of zero (default) means that no maximum is set and the system-wide size limit check will not be performed. + +This limit will be evaluated before API-level or endpoint-level configurations. If this test fails, the Tyk Gateway will return an error `HTTP 413 Request Entity Too Large`. + + + +Tyk Cloud Classic enforces a strict request size limit of 1MB on all inbound requests via our cloud architecture. This limit does not apply to Tyk Cloud users. + + + +
+ +If you're using Tyk OAS APIs, then you can find details and examples of how to configure an API or endpoint-level request size limit [here](#request-size-limits-using-tyk-oas). + +If you're using Tyk Classic APIs, then you can find details and examples of how to configure an API or endpoint-level request size limit [here](#request-size-limits-using-classic). + +{/* proposed "summary box" to be shown graphically on each middleware page + # Request Size Limit middleware summary + - The Request Size Limit middleware is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Request Size Limit middleware can be configured at the system level within the Gateway config, or per-API or per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + +## Using Tyk OAS + + +The [request size limit](/api-management/traffic-transformation/request-size-limits) middleware enables you to apply limits to the size of requests made to your HTTP APIs. You might use this feature to protect your Tyk Gateway or upstream services from excessive memory usage or brute force attacks. + +The middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#request-size-limits-using-classic) page. + +### API Definition + +There are three different levels of granularity that can be used when configuring a request size limit. +- [system-wide](#applying-a-system-level-size-limit): affecting all APIs deployed on the gateway +- [API-level](#applying-a-size-limit-for-a-specific-api): affecting all endpoints for an API +- [endpoint-level](#applying-a-size-limit-for-a-specific-endpoint): affecting a single API endpoint + +#### Applying a size limit for a specific API + +The API-level size limit has not yet been implemented for Tyk OAS APIs. + +You can work around this by implementing a combination of endpoint-level size limits and [allow](/api-management/traffic-transformation/allow-list#api-definition) or [block](/api-management/traffic-transformation/block-list#api-designer) lists. + +#### Applying a size limit for a specific endpoint + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The virtual endpoint middleware (`requestSizeLimit`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `requestSizeLimit` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `value`: the maximum size permitted for a request to the endpoint (in bytes) + +For example: +```json {hl_lines=["39-44"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-request-size-limit", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "post": { + "operationId": "anythingpost", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-request-size-limit", + "state": { + "active": true, + "internal": false + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-request-size-limit/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingpost": { + "requestSizeLimit": { + "enabled": true, + "value": 100 + } + } + } + } + } +} +``` + +In this example the endpoint-level Request Size Limit middleware has been configured for HTTP `POST` requests to the `/anything` endpoint. For any call made to this endpoint, Tyk will check the size of the payload (Request body) and, if it is larger than 100 bytes, will reject the request, returning `HTTP 400 Request is too large`. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the virtual endpoint middleware. + +### API Designer + +Adding the Request Size Limit middleware to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint for the path** + + From the **API Designer** add an endpoint that matches the path for you want to limit the size of requests. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Request Size Limit middleware** + + Select **ADD MIDDLEWARE** and choose the **Request Size Limit** middleware from the *Add Middleware* screen. + + Adding the Request Size Limit middleware + +3. **Configure the middleware** + + Now you can set the **size limit** that the middleware should enforce - remember that this is given in bytes. + + Setting the size limit that should be enforced + +4. **Save the API** + + Select **ADD MIDDLEWARE** to save the middleware configuration. Remember to select **SAVE API** to apply the changes to your API. + +## Using Classic + + +The [request size limit](/api-management/traffic-transformation/request-size-limits) middleware enables you to apply limits to the size of requests made to your HTTP APIs. You might use this feature to protect your Tyk Gateway or upstream services from excessive memory usage or brute force attacks. + +This middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#request-size-limits-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the middleware in Tyk Operator](#tyk-operator) section below. + +### API Definition + +There are three different levels of granularity that can be used when configuring a request size limit. +- [system-wide](#applying-a-system-level-size-limit): affecting all APIs deployed on the gateway +- [API-level](/api-management/traffic-transformation/request-headers#tyk-classic-api): affecting all endpoints for an API +- [endpoint-level](#tyk-classic-endpoint): affecting a single API endpoint + +#### Applying a size limit for a specific API + + +You can configure a request size limit (in bytes) to an API by configuring the `global_size_limit` within the `version` element of the API Definition, for example: +``` +"global_size_limit": 2500 +``` + +A value of zero (default) means that no maximum is set and the API-level size limit check will not be performed. + +This limit is applied for all endpoints within an API. It is evaluated after the Gateway-wide size limit and before any endpoint-specific size limit. If this test fails, the Tyk Gateway will report `HTTP 400 Request is too large`. + +#### Applying a size limit for a specific endpoint + + +The most granular control over request sizes is provided by the endpoint-level configuration. This limit will be applied after any Gateway-level or API-level size limits and is given in bytes. If this test fails, the Tyk Gateway will report `HTTP 400 Request is too large`. + +To enable the middleware you must add a new `size_limits` object to the `extended_paths` section of your API definition. + +The `size_limits` object has the following configuration: +- `path`: the endpoint path +- `method`: the endpoint HTTP method +- `size_limit`: the maximum size permitted for a request to the endpoint (in bytes) + +For example: +```.json {linenos=true, linenostart=1} +{ + "extended_paths": { + "size_limits": [ + { + "disabled": false, + "path": "/anything", + "method": "POST", + "size_limit": 100 + } + ] + } +} +``` + +In this example the endpoint-level Request Size Limit middleware has been configured for HTTP `POST` requests to the `/anything` endpoint. For any call made to this endpoint, Tyk will check the size of the payload (Request body) and, if it is larger than 100 bytes, will reject the request, returning `HTTP 400 Request is too large`. + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure a request size limit for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to limit the size of requests. Select the **Request size limit** plugin. + + Select middleware + +2. **Configure the middleware** + + Set the request size limit, in bytes. + + Configure limit + +3. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + + + + + The Tyk Classic API Designer does not provide an option to configure `global_size_limit`, but you can do this from the Raw Definition editor. + + + +### Tyk Operator + +The process for configuring a request size limit is similar to that defined in section configuring the middleware in the Tyk Classic API Definition. Tyk Operator allows you to configure a request size limit for [all endpoints of an API](#tyk-operator-api) or for a [specific API endpoint](#tyk-operator-endpoint). + +#### Applying a size limit for a specific API + + +{/* Need an example */} +The process for configuring the request size_limits middleware for a specific API is similar to that explained in [applying a size limit for a specific API](#tyk-classic-api). + +You can configure a request size limit (in bytes) for all endpoints within an API by configuring the `global_size_limit` within the `version` element of the API Definition, for example: + +```yaml {linenos=true, linenostart=1, hl_lines=["19"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-global-limit +spec: + name: httpbin-global-limit + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-global-limit + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + global_size_limit: 5 + name: Default +``` + +The example API Definition above configures an API to listen on path `/httpbin-global-limit` and forwards requests upstream to http://httpbin.org. + +In this example the request size limit is set to 5 bytes. If the limit is exceeded then the Tyk Gateway will report `HTTP 400 Request is too large`. + +#### Applying a size limit for a specific endpoint + + +The process for configuring the request size_limits middleware for a specific endpoint is similar to that explained in [applying a size limit for a specific endpoint](#tyk-classic-endpoint). + +To configure the request size_limits middleware you must add a new `size_limits` object to the `extended_paths` section of your API definition, for example: + +```yaml {linenos=true, linenostart=1, hl_lines=["22-25"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-limit +spec: + name: httpbin-limit + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-limit + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + extended_paths: + size_limits: + - method: POST + path: /post + size_limit: 5 +``` + +The example API Definition above configures an API to listen on path `/httpbin-limit` and forwards requests upstream to http://httpbin.org. + +In this example the endpoint-level Request Size Limit middleware has been configured for `HTTP POST` requests to the `/post` endpoint. For any call made to this endpoint, Tyk will check the size of the payload (Request body) and, if it is larger than 5 bytes, will reject the request, returning `HTTP 400 Request is too large`. \ No newline at end of file diff --git a/api-management/traffic-transformation/request-validation.mdx b/api-management/traffic-transformation/request-validation.mdx new file mode 100644 index 000000000..b9b89cdc1 --- /dev/null +++ b/api-management/traffic-transformation/request-validation.mdx @@ -0,0 +1,399 @@ +--- +title: "Request Validation" +description: "How to configure Request Validation traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Request Validation" +sidebarTitle: "Request Validation" +--- + +## Overview + + +Requests to your upstream services should meet the contract that you have defined for those APIs. Checking the content and format of incoming requests before they are passed to the upstream APIs can avoid unexpected errors and provide additional security to those services. Tyk's request validation middleware provides a way to validate the presence, correctness and conformity of HTTP requests to make sure they meet the expected format required by the upstream API endpoints. + +Request validation enables cleaner backend APIs, better standardization across consumers, easier API evolution and reduced failure risk leading to higher end-to-end reliability. + +### Use Cases + +#### Improving security of upstream services + +Validating incoming requests against a defined schema protects services from unintended consequences arising from bad input, such as SQL injection or buffer overflow errors, or other unintended failures caused by missing parameters or invalid data types. Offloading this security check to the API Gateway provides an early line of defense as potentially bad requests are not proxied to your upstream services. + +#### Offloading contract enforcement + +You can ensure that client requests adhere to a defined contract specifying mandatory headers or parameters before sending requests upstream. Performing these validation checks in the API Gateway allows API developers to focus on core domain logic. + +#### Supporting data transformation + +Validation goes hand-in-hand with request [header](/api-management/traffic-transformation/request-headers) and [body](/api-management/traffic-transformation/request-body) transformation by ensuring that a request complies with the expected schema prior to transformation. For example, you could validate that a date parameter is present, then transform it into a different date format as required by your upstream API dynamically on each request. + +### Working + +The incoming request is compared with a defined schema, which is a structured description of the expected format for requests to the endpoint. This request schema defines the required and optional elements such as headers, path/query parameters, payloads and their data types. It acts as a contract for clients. + +If the incoming request does not match the schema, it will be rejected with an `HTTP 422 Unprocessable Entity` error. This error code can be customized if required. + +When using [Tyk OAS APIs](/api-management/traffic-transformation/request-validation#request-validation-using-tyk-oas), request validation is performed by the `Validate Request` middleware which can be enabled per-endpoint. The schema against which requests are compared is defined in the OpenAPI description of the endpoint. All elements of the request can have a `schema` defined in the OpenAPI description so requests to Tyk OAS APIs can be validated for headers, path/query parameters and body (payload). + +When using the legacy [Tyk Classic APIs](/api-management/traffic-transformation/request-validation#request-validation-using-classic), request validation is performed by the `Validate JSON` middleware which can be enabled per-endpoint. The schema against which requests are compared is defined in the middleware configuration and is limited to the request body (payload). Request headers and path/query parameters cannot be validated when using Tyk Classic APIs. + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Validate Request middleware summary + - The Validate Request middleware is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Validate Request middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + +## Using Tyk OAS + + +The [request validation](#request-validation-overview) middleware provides a way to validate the presence, correctness and conformity of HTTP requests to make sure they meet the expected format required by the upstream API endpoints. If the incoming request fails validation, the Tyk Gateway will reject the request with an `HTTP 422 Unprocessable Entity` response. Tyk can be [configured](#configuring-the-request-validation-middleware) to return a different HTTP status code if required. + +The middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](/api-management/traffic-transformation/request-validation#request-validation-using-classic) page. + +### Request schema in OpenAPI Specification + +The OpenAPI Specification supports the definition of a [schema](https://learn.openapis.org/specification/content.html#the-schema-object) to describe and limit the content of any field in an API request or response. + +Tyk's request validation middleware automatically parses the schema for the request in the OpenAPI description part of the Tyk OAS API Definition and uses this to compare against the incoming request. + +An OpenAPI schema can reference other schemas defined elsewhere, letting you write complex validations very efficiently since you don’t need to re-define the validation for a particular object every time you wish to refer to it. Tyk only supports local references to schemas (within the same OpenAPI document). + +As explained in the OpenAPI [documentation](https://learn.openapis.org/specification/parameters.html), the structure of an API request is described by two components: +- parameters (headers, query parameters, path parameters) +- request body (payload) + +#### Request parameters + +The `parameters` field in the OpenAPI description is an array of [parameter objects](https://swagger.io/docs/specification/describing-parameters/) that each describe one variable element in the request. Each `parameter` has two mandatory fields: +- `in`: the location of the parameter (`path`, `query`, `header`) +- `name`: a unique identifier within that location (i.e. no duplicate header names for a given operation/endpoint) + +There are also optional `description` and `required` fields. + +For each parameter, a schema can be declared that defines the `type` of data that can be stored (e.g. `boolean`, `string`) and any `example` or `default` values. + +##### Operation (endpoint-level) parameters + +An operation is a combination of HTTP method and path or, as Tyk calls it, an endpoint - for example `GET /users`. Operation, or endpoint-level parameters can be defined in the OpenAPI description and will apply only to that operation within the API. These can be added or modified within Tyk Dashboard's [API designer](#api-designer). + +##### Common (path-level) parameters + +[Common parameters](https://swagger.io/docs/specification/v3_0/describing-parameters/#common-parameters), that apply to all operations within a path, can be defined at the path level within the OpenAPI description. Tyk refers to these as path-level parameters and displays them as read-only fields in the Dashboard's API designer. If you need to add or modify common parameters you must use the *Raw Definition* editor, or edit your OpenAPI document outside Tyk and [update](/api-management/gateway-config-managing-oas#updating-an-api) the API. + +#### Request body + +The `requestBody` field in the OpenAPI description is a [Request Body Object](https://swagger.io/docs/specification/describing-request-body/). This has two optional fields (`description` and `required`) plus the `content` section which allows you to define a schema for the expected payload. Different schemas can be declared for different media types that are identified by content-type (e.g. `application/json`, `application/xml` and `text/plain`). + +### Configuring the request validation middleware + +When working with Tyk OAS APIs, the request validation middleware automatically determines the validation rules based on the API schema. The only configurable option for the middleware is to set the desired HTTP status code that will be returned if a request fails validation. The default response will be `HTTP 422 Unprocessable Entity` unless otherwise configured. + +### Enabling the request validation middleware + +If the middleware is enabled for an endpoint, then Tyk will automatically validate requests made to that endpoint against the schema defined in the API definition. + +When you create a Tyk OAS API by importing your OpenAPI description, you can instruct Tyk to enable request validation [automatically](#automatically-enabling-the-request-validation-middleware) for all endpoints with defined schemas. + +If you are creating your API without import, or if you only want to enable request validation for some endpoints, you can [manually enable](#manually-enabling-the-request-validation-middleware) the middleware in the Tyk OAS API definition. + +#### Automatically enabling the request validation middleware + +The request validation middleware can be enabled for all endpoints that have defined schemas when [importing](/api-management/gateway-config-managing-oas#importing-an-openapi-description-to-create-an-api) an OpenAPI Document to create a Tyk OAS API. +- if you are using the `POST /apis/oas/import` endpoint in the [Tyk Dashboard API](/tyk-dashboard-api) or [Tyk Gateway API](/tyk-gateway-api) then you can do this by setting the `validateRequest=true` query parameter +- if you are using the API Designer, select the **Auto-generate middleware to validate requests** option on the **Import API** screen + +Select the option during OpenAPI import to validate requests + +As noted, the automatic application of request validation during import will apply the middleware to all endpoints declared in your OpenAPI description. If you want to adjust this configuration, for example to remove validation from specific endpoints or to change the HTTP status code returned on error, you can update the Tyk OAS API definition as described [here](#manually-enabling-the-request-validation-middleware). + +#### Manually enabling the request validation middleware + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The request validation middleware (`validateRequest`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId`. The `operationId` for an endpoint can be found within the `paths` section of your [OpenAPI specification](https://swagger.io/docs/specification/paths-and-operations/?sbsearch=operationIds). + +The `validateRequest` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `errorResponseCode`: [optional] the HTTP status code to be returned if validation fails (this defaults to `HTTP 422 Unprocessable Entity` if not set) + +For example: +```json {hl_lines=["69-72"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-validate-request", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "parameters": [ + { + "in": "header", + "name": "X-Security", + "required": true, + "schema": { + "type": "boolean" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "properties": { + "firstname": { + "description": "The person's first name", + "type": "string" + }, + "lastname": { + "description": "The person's last name", + "type": "string" + } + }, + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-validate-request", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-validate-request/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingget": { + "validateRequest": { + "enabled": true, + "errorResponseCode": 400 + } + } + } + } + } +} +``` + +In this example the request validation middleware has been configured for requests to the `GET /anything` endpoint. The middleware will check for the existence of a header named `X-Security` and the request body will be validated against the declared schema. If there is no match, the request will be rejected and Tyk will return `HTTP 400` (as configured in `errorResponseCode`). + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the request validation middleware. + +### API Designer + +Adding and configuring Request Validation for your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Validate Request middleware** + + Select **ADD MIDDLEWARE** and choose **Validate Request** from the *Add Middleware* screen. + + Adding the Validate Request middleware + + The API Designer will show you the request body and request parameters schema detected in the OpenAPI description of the endpoint. + + Validate Request middleware schema is automatically populated + +3. **Configure the middleware** + + If required, you can select an alternative HTTP status code that will be returned if request validation fails. + + Configuring the Request Validation error response + +4. **Save the API** + + Select **ADD MIDDLEWARE** to save the middleware configuration. Remember to select **SAVE API** to apply the changes. + + +## Using Classic + + +The [request validation](#request-validation-overview) middleware provides a way to validate the presence, correctness and conformity of HTTP requests to make sure they meet the expected format required by the upstream API endpoints. + +When working with legacy Tyk Classic APIs, request validation is performed by the `Validate JSON` middleware which can be enabled per-endpoint. The schema against which requests are compared is defined in the middleware configuration and is limited to the request body (payload). Request headers and path/query parameters cannot be validated when using Tyk Classic APIs. + +This middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](/api-management/traffic-transformation/request-validation#request-validation-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the middleware in Tyk Operator](#tyk-operator) section below. + +### API Definition + +To enable the middleware you must add a new `validate_json` object to the `extended_paths` section of your API definition. + +The `validate_json` object has the following configuration: + +- `path`: the endpoint path +- `method`: the endpoint HTTP method +- `schema`: the [JSON schema](https://json-schema.org/understanding-json-schema/basics) against which the request body will be compared +- `error_response_code`: the HTTP status code that will be returned if validation fails (defaults to `422 Unprocessable Entity`) + +For example: + +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "validate_json": [ + { + "disabled": false, + "path": "/register", + "method": "POST", + "schema": { + "type": "object", + "properties": { + "firstname": { + "type": "string", + "description": "The person's first name" + }, + "lastname": { + "type": "string", + "description": "The person's last name" + } + } + }, + "error_response_code": 422 + } + ] + } +} +``` + +In this example the Validate JSON middleware has been configured for requests to the `POST /register` endpoint. For any call made to this endpoint, Tyk will compare the request body with the schema and, if it does not match, the request will be rejected with the error code `HTTP 422 Unprocessable Entity`. + +#### Understanding JSON Schema Version Handling + +The Gateway automatically detects the version of the JSON schema from the `$schema` field in your schema definition. This field specifies the version of the [JSON schema standard](https://json-schema.org/specification-links) to be followed. + +From Tyk 5.8 onwards, supported versions are [draft-04](https://json-schema.org/draft-04/schema), [draft-06](https://json-schema.org/draft-06/schema) and [draft-07](https://json-schema.org/draft-07/schema). + +In previous versions of Tyk, only [draft-04](https://json-schema.org/draft-04/schema) is supported. Please be careful if downgrading from Tyk 5.8 to an earlier version that your JSON is valid as you might experience unexpected behaviour if using features from newer drafts of the JSON schema. + +- If the `$schema` field is present, the Gateway strictly follows the rules of the specified version. +- If the `$schema` field is missing or the version is not specified, the Gateway uses a hybrid mode that combines features from multiple schema versions. This mode ensures that the validation will still work, but may not enforce the exact rules of a specific version. + +To ensure consistent and predictable validation, it is recommended to always include the `$schema` field in your schema definition. For example: + +```json +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "firstname": { + "type": "string" + }, + "lastname": { + "type": "string" + } + } +} +``` + +By including `$schema`, the validator can operate in strict mode, ensuring that the rules for your chosen schema version are followed exactly. + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the request validation middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to validate the request payload. Select the **Validate JSON** plugin. + + validate json plugin + +2. **Configure the middleware** + + Once you have selected the request validation middleware for the endpoint, you can select an error code from the drop-down list (if you don't want to use the default `422 Unprocessable Entity`) and enter your JSON schema in the editor. + + Adding schema to the Validate JSON middleware + +3. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +### Tyk Operator + +The process for configuring the middleware in Tyk Operator is similar to that explained in configuring the middleware in the Tyk Classic API Definition. To configure the request validation middleware you must add a new `validate_json` object to the `extended_paths` section of your API definition, for example: + +The example API Definition below configures an API to listen on path `/httpbin` and forwards requests upstream to http://httpbin.org. + +In this example, the Validate JSON middleware has been configured for requests to the `GET /get` endpoint. For any call made to this endpoint, Tyk will compare the request body with the schema and, if it does not match, the request will be rejected with the error code `HTTP 422 Unprocessable Entity`. + +```yaml {linenos=true, linenostart=1, hl_lines=["26-41"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-json-schema-validation +spec: + name: httpbin-json-schema-validation + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + validate_json: + - error_response_code: 422 + disabled: false + path: /get + method: GET + schema: + properties: + userName: + type: string + minLength: 2 + age: + type: integer + minimum: 1 + required: + - userName + type: object +``` + diff --git a/api-management/traffic-transformation/response-body.mdx b/api-management/traffic-transformation/response-body.mdx new file mode 100644 index 000000000..012d61cec --- /dev/null +++ b/api-management/traffic-transformation/response-body.mdx @@ -0,0 +1,482 @@ +--- +title: "Response Body" +description: "How to configure Response Body traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Response Body" +sidebarTitle: "Response Body" +--- + +## Overview + +Tyk enables you to modify the payload of API responses received from your upstream services before they are passed on to the client that originated the request. This makes it easy to transform between payload data formats or to expose legacy APIs using newer schema models without having to change any client implementations. This middleware is only applicable to endpoints that return a body with the response. + +With the body transform middleware you can modify XML or JSON formatted payloads to ensure that the response contains the information required by your upstream service. You can enrich the response by adding contextual data that is held by Tyk but not included in the original response from the upstream. + +This middleware changes only the payload and not the headers. You can, however, combine this with the [Response Header Transform](/api-management/traffic-transformation/response-headers) to apply more complex transformation to responses. + +There is a closely related [Request Body Transform](/api-management/traffic-transformation/request-body) middleware that provides the same functionality on the request sent by the client prior to it being proxied to the upstream. + +### Use Cases + +#### Maintaining compatibility with legacy clients + +Sometimes you might have a legacy API and need to migrate the transactions to a new upstream service but do not want to upgrade all the existing clients to the newer upstream API. Using response body transformation, you can convert the new format that your upstream services provide into legacy XML or JSON expected by the clients. + +#### Shaping responses for different devices + +You can detect the client device types via headers or context variables and transform the response payload to optimize it for that particular device. For example, you might optimize the response content for mobile apps. + +#### SOAP to REST translation + +A common use of the response body transform middleware is when surfacing a legacy SOAP service with a REST API. Full details of how to perform this conversion using Tyk are provided [here](/advanced-configuration/transform-traffic/soap-rest). + +### Working + +Tyk's body transform middleware uses the [Go template language](https://golang.org/pkg/text/template/) to parse and modify the provided input. We have bundled the [Sprig Library (v3)](http://masterminds.github.io/sprig/) which provides over 70 pre-written functions for transformations to assist the creation of powerful Go templates to transform your API responses. + +The Go template can be defined within the API Definition or can be read from a file that is accessible to Tyk, for example alongside your [error templates](/api-management/gateway-events#error-templates). + +We have provided more detail, links to reference material and some examples of the use of Go templating [here](/api-management/traffic-transformation/go-templates). + + + +Tyk evaluates templates stored in files on startup, so if you make changes to a template you must remember to restart the gateway. + + + +#### Supported response body formats + +The body transformation middleware can modify response payloads in the following formats: +- JSON +- XML + +When working with JSON format data, the middleware will unmarshal the data into a data structure, and then make that data available to the template in dot-notation. + +#### Data accessible to the middleware + +The middleware has direct access to the response body and also to dynamic data as follows: +- [Context variables](/api-management/traffic-transformation/request-context-variables), extracted from the request at the start of the middleware chain, can be injected into the template using the `._tyk_context.KEYNAME` namespace +- [Session metadata](/api-management/policies#what-is-a-session-metadata), from the Tyk Session Object linked to the request, can be injected into the template using the `._tyk_meta.KEYNAME` namespace +- Inbound form or query data can be accessed through the `._tyk_context.request_data` namespace where it will be available in as a `key:[]value` map +- values from [key-value (KV) storage](/tyk-configuration-reference/kv-store#transformation-middleware) can be injected into the template using the notation appropriate to the location of the KV store + +The response body transform middleware can iterate through list indices in dynamic data so, for example, calling `{{ index ._tyk_context.request_data.variablename 0 }}` in a template will expose the first entry in the `request_data.variablename` key/value array. + + + +As explained in the [documentation](https://pkg.go.dev/text/template), templates are executed by applying them to a data structure. The template receives the decoded JSON or XML of the response body. If session variables or meta data are enabled, additional fields will be provided: `_tyk_context` and `_tyk_meta` respectively. + + + +#### Automatic XML <-> JSON Transformation + +A very common transformation that is applied in the API Gateway is to convert between XML and JSON formatted body content. + +The Response Body Transform supports two helper functions that you can use in your Go templates to facilitate this: +- `jsonMarshal` performs JSON style character escaping on an XML field and, for complex objects, serialises them to a JSON string ([example](/api-management/traffic-transformation/go-templates#xml-to-json-conversion-using-jsonmarshal)) +- `xmlMarshal` performs the equivalent conversion from JSON to XML ([example](/api-management/traffic-transformation/go-templates#json-to-xml-conversion-using-xmlmarshal)) + +
+ + +{/* proposed "summary box" to be shown graphically on each middleware page + # Response Body Transform middleware summary + - The Response Body Transform middleware is an optional stage in Tyk's API Response processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Response Body Transform middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. + - Response Body Transform can access both [session metadata](/api-management/policies#what-is-a-session-metadata) and [request context variables](/api-management/traffic-transformation/request-context-variables). */} + + +## Using Tyk OAS + + +The [response body transform](/api-management/traffic-transformation/response-body) middleware provides a way to modify the payload of API responses before they are returned to the client. + +The middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#response-body-using-classic) page. + +### API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The response body transformation middleware (`transformResponseBody`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `transformResponseBody` object has the following configuration: +- `enabled`: enable the middleware for the endpoint +- `format`: the format of input data the parser should expect (either `xml` or `json`) +- `body`: [see note] this is a `base64` encoded representation of your template +- `path`: [see note] this is the path to the text file containing the template + + + + + You should configure only one of `body` or `path` to indicate whether you are embedding the template within the middleware or storing it in a text file. The middleware will automatically select the correct source based on which of these fields you complete. If both are provided, then `body` will take precedence and `path` will be ignored. + + + +For example: +```json {hl_lines=["39-43"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-response-body-transform", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "put": { + "operationId": "anythingput", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-response-body-transform", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-response-body-transform/", + "strip": true + } + }, + "middleware": { + "operations": { + "anythingput": { + "transformResponseBody": { + "enabled": true, + "format": "json", + "body": "ewogICJ2YWx1ZTEiOiAie3sudmFsdWUyfX0iLAogICJ2YWx1ZTIiOiAie3sudmFsdWUxfX0iLAogICJyZXEtaGVhZGVyIjogInt7Ll90eWtfY29udGV4dC5oZWFkZXJzX1hfSGVhZGVyfX0iLAogICJyZXEtcGFyYW0iOiAie3suX3R5a19jb250ZXh0LnJlcXVlc3RfZGF0YS5wYXJhbX19Igp9" + } + } + } + } + } +} +``` + +In this example the response body transform middleware has been configured for requests to the `PUT /anything` endpoint. The `body` contains a base64 encoded Go template (which you can check by pasting the value into a service such as [base64decode.org](https://www.base64decode.org)). + +Decoded, this template is: +```go +{ + "value1": "{{.value2}}", + "value2": "{{.value1}}", + "req-header": "{{._tyk_context.headers_X_Header}}", + "req-param": "{{._tyk_context.request_data.param}}" +} +``` + +So if you make a request to `PUT /anything?param=foo`, configuring a header `X-Header`:`bar` and providing this payload: +```json +{ + "value1": "world", + "value2": "hello" +} +``` + +httpbin.org will respond with the original payload in the response and, if you do not have the response body transform middleware enabled, the response from Tyk will include: +```json +{ + "value1": "world", + "value2": "hello" +} +``` + +If, however, you enable the response body transform middleware, Tyk will modify the response to include this content: +```json +{ + "req-header": "bar", + "req-param": "[foo]", + "value1": "hello", + "value2": "world" +} +``` + +You can see that Tyk has swapped `value1` and `value2` and embedded the `X-Header` header and `param` query values from the request into the body of the response. + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the mock response middleware. + + + +If using a template in a file (i.e. you configure `path` in the `transformResponseBody` object), remember that Tyk will load and evaluate the template when the Gateway starts up. If you modify the template, you will need to restart Tyk in order for the changes to take effect. + + + +### API Designer + +Adding Response Body Transformation to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow the following steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Response Body Transform middleware** + + Select **ADD MIDDLEWARE** and choose the **Response Body Transform** middleware from the *Add Middleware* screen. + + Adding the Response Body Transform middleware + +3. **Configure the middleware** + + Now you can select the response body format (JSON or XML) and add either a path to the file containing the template, or directly enter the transformation template in the text box. + + Configuring the Response Body Transform middleware + + The **Test with data** control will allow you to test your body transformation function by providing an example response body and generating the output from the transform. It is not possible to configure headers, other request parameters, context or session metadata to this template test so if you are using these data sources in your transform it will not provide a complete output, for example: + + Testing the Response Body Transform + +4. **Save the API** + + Select **SAVE API** to apply the changes to your API. + +## Using Classic + + +The [response body transform](/api-management/traffic-transformation/response-body) middleware provides a way to modify the payload of API responses before they are returned to the client. + +This middleware is configured in the Tyk Classic API Definition at the endpoint level. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#response-body-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the middleware in Tyk Operator](#tyk-operator) section below. + +### API Definition + +To enable the middleware you must add a new `transform_response` object to the `extended_paths` section of your API definition. + +The `transform_response` object has the following configuration: +- `path`: the path to match on +- `method`: this method to match on +- `template_data`: details of the Go template to be applied for the transformation of the response body + +The Go template is described in the `template_data` object by the following fields: +- `input_type`: the format of input data the parser should expect (either `xml` or `json`) +- `enable_session`: set this to `true` to make session metadata available to the transform template +- `template_mode`: instructs the middleware to look for the template either in a `file` or in a base64 encoded `blob`; the actual file location (or base64 encoded template) is provided in `template_source` +- `template_source`: if `template_mode` is set to `file`, this will be the path to the text file containing the template; if `template_mode` is set to `blob`, this will be a `base64` encoded representation of your template + +For example: +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "transform_response": [ + { + "path": "/anything", + "method": "POST", + "template_data": { + "template_mode": "file", + "template_source": "./templates/transform_test.tmpl", + "input_type": "json", + "enable_session": true + } + } + ] + } +} +``` + +In this example, the Response Body Transform middleware is directed to use the template located in the `file` at location `./templates/transform_test.tmpl`. The input (pre-transformation) response payload will be `json` format and session metadata will be available for use in the transformation. + + + +Tyk will load and evaluate the template file when the Gateway starts up. If you modify the template, you will need to restart Tyk in order for the changes to take effect. + + + + + +Prior to Tyk 5.3, there was an additional step to enable response body transformation. You would need to add the following to the Tyk Classic API definition: + +```json +{ + "response_processors":[ + {"name": "response_body_transform"} + ] +} +``` + +If using the Endpoint Designer in the Tyk Dashboard, this would be added automatically. + +We removed the need to configure the `response_processors` element in Tyk 5.3.0. + + + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the response body transform middleware for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to perform the transformation. Select the **Body Transforms** plugin. + + Endpoint designer + +2. **Configure the middleware** + + Ensure that you have selected the `RESPONSE` tab, then select your input type, and then add the template you would like to use to the **Template** input box. + + Setting the body response transform + +3. **Test the Transform** + + If you have sample input data, you can use the Input box to add it, and then test it using the **Test** button. You will see the effect of the template on the sample input in the Output box. + + Testing the body transform function + +4. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the Response Body Transform middleware. + +### Tyk Operator + +The process of configuring a transformation of a response body for a specific endpoint is similar to that defined in section configuring the middleware in the Tyk Classic API Definition for the Tyk Classic API definition. To enable the middleware you must add a new `transform_response` object to the `extended_paths` section of your API definition. + +In the examples below, the Response Body Transform middleware (`transform_response`) is directed to use the template located in the `template_source`, decoding the xml in the base64 encoded string. The input (pre-transformation) response payload will be `xml` format and there is no session metadata provided for use in the transformation. + +#### Example + +```yaml {linenos=true, linenostart=1, hl_lines=["45-53"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-transform +spec: + name: httpbin-transform + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-transform + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + transform: + - method: POST + path: /anything + template_data: + enable_session: false + input_type: json + template_mode: blob + # base64 encoded template + template_source: eyJiYXIiOiAie3suZm9vfX0ifQ== + transform_headers: + - delete_headers: + - "remove_this" + add_headers: + foo: bar + path: /anything + method: POST + transform_response: + - method: GET + path: /xml + template_data: + enable_session: false + input_type: xml + template_mode: blob + # base64 encoded template + template_source: e3sgLiB8IGpzb25NYXJzaGFsIH19 + transform_response_headers: + - method: GET + path: /xml + add_headers: + Content-Type: "application/json" + act_on: false + delete_headers: [] +``` + +#### Tyk Gateway < 5.3.0 Example + +If using Tyk Gateway < v5.3.0 then a `response_processor` object must be added to the API definition containing a `response_body_transform` item, as highlighted below: + +```yaml {linenos=true, linenostart=1, hl_lines=["17-18", "48-56"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-transform +spec: + name: httpbin-transform + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-transform + strip_listen_path: true + response_processors: + - name: response_body_transform + - name: header_injector + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + transform: + - method: POST + path: /anything + template_data: + enable_session: false + input_type: json + template_mode: blob + # base64 encoded template + template_source: eyJiYXIiOiAie3suZm9vfX0ifQ== + transform_headers: + - delete_headers: + - "remove_this" + add_headers: + foo: bar + path: /anything + method: POST + transform_response: + - method: GET + path: /xml + template_data: + enable_session: false + input_type: xml + template_mode: blob + # base64 encoded template + template_source: e3sgLiB8IGpzb25NYXJzaGFsIH19 + transform_response_headers: + - method: GET + path: /xml + add_headers: + Content-Type: "application/json" + act_on: false + delete_headers: [] +``` + diff --git a/api-management/traffic-transformation/response-headers.mdx b/api-management/traffic-transformation/response-headers.mdx new file mode 100644 index 000000000..473a9b692 --- /dev/null +++ b/api-management/traffic-transformation/response-headers.mdx @@ -0,0 +1,670 @@ +--- +title: "Response Headers" +description: "How to configure Response Headers traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Response Headers" +sidebarTitle: "Response Headers" +--- + +## Overview + +Tyk enables you to modify header information when a response is proxied back to the client. This can be very useful in cases where you have an upstream API that potentially exposes sensitive headers that you need to remove. + +There are two options for this: +- API-level modification that is applied to responses for all requests to the API +- endpoint-level modification that is applied only to responses for requests to a specific endpoint + +With the header transform middleware you can append or delete any number of headers to ensure that the response contains the information required by your client. You can enrich the response by adding contextual data that is held by Tyk but not included in the original response from the upstream. + +This middleware changes only the headers and not the payload. You can, however, combine this with the [Response Body Transform](/api-management/traffic-transformation/response-body) to apply more complex transformation to responses. + +There are related [Request Header Transform](/api-management/traffic-transformation/request-headers) middleware (at API-level and endpoint-level) that provide the same functionality on the request from a client, prior to it being proxied to the upstream. + +### Use Cases + +#### Customizing responses for specific clients + +A frequent use case for response header transformation is when a client requires specific headers for their application to function correctly. For example, a client may require a specific header to indicate the status of a request or to provide additional information about the response. + +#### Adding security headers + +The response header transform allows you to add security headers to the response to protect against common attacks such as cross-site scripting (XSS) and cross-site request forgery (CSRF). Some security headers may be required for compliance with industry standards and, if not provided by the upstream, can be added by Tyk before forwarding the response to the client. + +#### Adding metadata to response headers + +Adding metadata to response headers can be useful for tracking and analyzing API usage, as well as for providing additional information to clients. For example, you may want to add a header that indicates the version of the API being used or the time taken to process the request. + +#### Modifying response headers for dynamic performance optimization + +You can use response header transformation to dynamically optimize the performance of the API. For example, you may want to indicate to the client the maximum number of requests that they can make in a given time period. By doing so through the response headers, you can perform dynamic optimization of the load on the upstream service without triggering the rate limiter and so avoiding errors being sent to the client. + +### Working + +The response header transform can be applied per-API or per-endpoint; each has a separate entry in the API definition so that you can configure both API-level and endpoint-level transforms for a single API. + +The middleware is configured with a list of headers to delete from the response and a list of headers to add to the response. Each header to be added to the response is configured as a key:value pair. +- the "delete header" functionality is intended to ensure that any header in the delete list is not present once the middleware completes. If a header in the delete list is not present in the upstream response, the middleware will ignore the omission +- the "add header" functionality will capitalize any header name provided. For example, if you configure the middleware to append `x-request-id` it will be added to the response as `X-Request-Id` + +In the response middleware chain, the endpoint-level transform is applied before the API-level transform. Subsequently, if both middleware are enabled, the API-level transform will operate on the headers that have been added by the endpoint-level transform (and will not have access to those that have been deleted by it). + +#### Injecting dynamic data into headers + +You can enrich the response headers by injecting data from context variables or session objects into the headers. +- [context variables](/api-management/traffic-transformation/request-context-variables), extracted from the request at the start of the middleware chain, can be injected into added headers using the `$tyk_context.` namespace +- [session metadata](/api-management/policies#what-is-a-session-metadata), from the Tyk Session Object linked to the request, can be injected into added headers using the `$tyk_meta.` namespace +- values from [key-value (KV) storage](/tyk-configuration-reference/kv-store#transformation-middleware) can be injected into added headers using the notation appropriate to the location of the KV store + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Response Header Transform middleware summary + - The Response Header Transform is an optional stage in Tyk's API Response processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Response Header Transform can be configured at the per-endpoint or per-API level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + +## Using Tyk OAS + + +Tyk's [response header transform](/api-management/traffic-transformation/response-headers) middleware enables you to append or delete headers on responses received from the upstream service before sending them to the client. + +There are two options for this: +- API-level modification that is applied to all responses for the API +- endpoint-level modification that is applied only to responses from a specific endpoint + + + + + If both API-level and endpoint-level middleware are configured, the endpoint-level transformation will be applied first. + + + +When working with Tyk OAS APIs the transformation is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#response-headers-using-classic) page. + +### API Definition + +The API-level and endpoint-level response header transforms have a common configuration but are configured in different sections of the API definition. + +#### API-level transform + +To append headers to, or delete headers from, responses from all endpoints defined for your API you must add a new `transformResponseHeaders` object to the `middleware.global` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition. + +You only need to enable the middleware (set `enabled:true`) and then configure the details of headers to `add` and those to `remove`. + +For example: +```json {hl_lines=["38-57"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-response-header", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/status/200": { + "get": { + "operationId": "status/200get", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-response-header", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-response-header/", + "strip": true + } + }, + "middleware": { + "global": { + "transformResponseHeaders": { + "enabled": true, + "remove": [ + "X-Secret" + ], + "add": [ + { + "name": "X-Static", + "value": "foobar" + }, + { + "name": "X-Request-ID", + "value": "$tyk_context.request_id" + }, + { + "name": "X-User-ID", + "value": "$tyk_meta.uid" + } + ] + } + } + } + } +} +``` + +This configuration will add three new headers to each response: +- `X-Static` with the value `foobar` +- `X-Request-ID` with a dynamic value taken from the `request_id` [context variable](/api-management/traffic-transformation/request-context-variables) +- `X-User-ID` with a dynamic value taken from the `uid` field in the [session metadata](/api-management/policies#what-is-a-session-metadata) + +It will also delete one header (if present) from each response: +- `X-Secret` + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the API-level response header transform. + +#### Endpoint-level transform + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The response header transform middleware (`transformResponseMethod`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +You only need to enable the middleware (set `enabled:true`) and then configure the details of headers to `add` and those to `remove`. + +For example: +```json {hl_lines=["39-50"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-response-method", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/status/200": { + "get": { + "operationId": "status/200get", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-response-method", + "state": { + "active": true + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-response-method/", + "strip": true + } + }, + "middleware": { + "operations": { + "status/200get": { + "transformResponseHeaders": { + "enabled": true, + "remove": [ + "X-Static" + ], + "add": [ + { + "name": "X-Secret", + "value": "the-secret-key-is-secret" + } + ] + } + } + } + } + } +} +``` + +In this example the Response Header Transform middleware has been configured for HTTP `GET` requests to the `/status/200` endpoint. Any response received from the upstream service following a request to that endpoint will have the `X-Static` header removed and the `X-Secret` and `X-New` headers added (with values set to `the-secret-key-is-secret` and `another-header`). + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the endpoint-level response header transform. + +#### Combining API-level and Endpoint-level transforms + +If the example [API-level](#api-level-transform) and [endpoint-level](#endpoint-level-transform) transforms are applied to the same API, then the `X-Secret` header will be added (by the endpoint-level transform first) and then removed (by the API-level transform). Subsequently, the result of the two transforms for a call to `GET /status/200` would be to add four headers: +- `X-Request-ID` +- `X-User-ID` +- `X-Static` +- `X-New` + +### API Designer + +Adding and configuring the transforms to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +#### Adding an API-level transform + +From the **API Designer** on the **Settings** tab, after ensuring that you are in *edit* mode, toggle the switch to **Enable Transform response headers** in the **Middleware** section: +Tyk OAS API Designer showing API-level Response Header Transform + +Then select **NEW HEADER** as appropriate to add or remove a header from API responses. You can add or remove multiple headers by selecting **ADD HEADER** to add another to the list: +Configuring the API-level Response Header Transform in Tyk OAS API Designer + +#### Adding an endpoint level transform + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Response Header Transform middleware** + + Select **ADD MIDDLEWARE** and choose the **Response Header Transform** middleware from the *Add Middleware* screen. + + Adding the URL Rewrite middleware + +3. **Configure header transformation** + + Select **NEW HEADER** to configure a header to be added to or removed from the response, you can add multiple headers to either list by selecting **NEW HEADER** again. + + Configuring the rewrite rules for Advanced Triggers + Configuring the Response Header Transform + +4. **Save the API** + + Select **ADD MIDDLEWARE** to save the middleware configuration. Remember to select **SAVE API** to apply the changes. + +## Using Classic + + +Tyk's [response header transform](/api-management/traffic-transformation/response-headers) middleware enables you to append or delete headers on responses received from the upstream service before sending them to the client. + +There are two options for this: +- API-level modification that is applied to all responses for the API +- endpoint-level modification that is applied only to responses from a specific endpoint + + + + + If both API-level and endpoint-level middleware are configured, the endpoint-level transformation will be applied first. + + + +When working with Tyk Classic APIs the transformation is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you want to use dynamic data from context variables, you must [enable](/api-management/traffic-transformation/request-context-variables#enabling-context-variables-for-use-with-tyk-classic-apis) context variables for the API to be able to access them from the response header transform middleware. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#response-headers-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the Response Header Transform in Tyk Operator](#tyk-operator) section below. + +### API Definition + +The API-level and endpoint-level response header transforms have a common configuration but are configured in different sections of the API definition. + + +Prior to Tyk 5.3.0, there was an additional step to enable response header transforms (both API-level and endpoint-level). You would need to add the following to the Tyk Classic API definition: + +```json +{ + "response_processors":[ + {"name": "header_injector"} + ] +} +``` + +If using the Endpoint Designer in the Tyk Dashboard, this would be added automatically. + +We removed the need to configure the `response_processors` element in Tyk 5.3.0. + + + +#### API-level transform + + +To **append** headers to all responses from your API (i.e. for all endpoints) you must add a new `global_response_headers` object to the `versions` section of your API definition. This contains a list of key:value pairs, being the names and values of the headers to be added to responses. + +To **delete** headers from all responses from your API (i.e. for all endpoints), you must add a new `global_response_headers_remove` object to the `versions` section of the API definition. This contains a list of the names of existing headers to be removed from responses. + +For example: +```json {linenos=true, linenostart=1} +{ + "version_data": { + "versions": { + "Default": { + "global_response_headers": { + "X-Static": "foobar", + "X-Request-ID":"$tyk_context.request_id", + "X-User-ID": "$tyk_meta.uid" + }, + "global_response_headers_remove": [ + "X-Secret" + ] + } + } + }, +} +``` + +This configuration will add three new headers to each response: +- `X-Static` with the value `foobar` +- `X-Request-ID` with a dynamic value taken from the `request_id` [context variable](/api-management/traffic-transformation/request-context-variables) +- `X-User-ID` with a dynamic value taken from the `uid` field in the [session metadata](/api-management/policies#what-is-a-session-metadata) + +It will also delete one header (if present) from each response: + - `X-Secret` + +#### Endpoint-level transform + + +To configure response header transformation for a specific endpoint you must add a new `transform_response_headers` object to the `extended_paths` section of your API definition. + +It has the following configuration: +- `path`: the endpoint path +- `method`: the endpoint HTTP method +- `delete_headers`: a list of the headers that should be deleted from the response +- `add_headers`: a list of headers, in key:value pairs, that should be added to the response + +For example: +```json {linenos=true, linenostart=1} +{ + "transform_response_headers": [ + { + "path": "status/200", + "method": "GET", + "delete_headers": ["X-Static"], + "add_headers": [ + {"X-Secret": "the-secret-key-is-secret"}, + {"X-New": "another-header"} + ], + } + ] +} +``` + +In this example the Response Header Transform middleware has been configured for HTTP `GET` requests to the `/status/200` endpoint. Any response received from the upstream service following a request to that endpoint will have the `X-Static` header removed and the `X-Secret` and `X-New` headers added (with values set to `the-secret-key-is-secret` and `another-header`). + +#### Combining API-level and Endpoint-level transforms + +If the example [API-level](#api-level-transform) and [endpoint-level](#endpoint-level-transform) transforms are applied to the same API, then the `X-Secret` header will be added (by the endpoint-level transform first) and then removed (by the API-level transform). Subsequently, the result of the two transforms for a call to `GET /status/200` would be to add four headers: +- `X-Request-ID` +- `X-User-ID` +- `X-Static` +- `X-New` + +#### Fixing response headers that leak upstream server data + +A middleware called `header_transform` was added in Tyk 2.1 specfically to allow you to ensure that headers such as `Location` and `Link` reflect the outward facade of your API Gateway and also align with the expected response location to be terminated at the gateway, not the hidden upstream proxy. + +This is configured by adding a new `rev_proxy_header_cleanup` object to the `response_processors` section of your API definition. + +It has the following configuration: +- `headers`: a list of headers in the response that should be modified +- `target_host`: the value to which the listed headers should be updated + +For example: +```json +{ + "response_processors": [ + { + "name": "header_transform", + "options": { + "rev_proxy_header_cleanup": { + "headers": ["Link", "Location"], + "target_host": "http://TykHost:TykPort" + } + } + } + ] +} +``` + +In this example, the `Link` and `Location` headers will be modified from the server-generated response, with the protocol, domain and port of the value set in `target_host`. + +This feature is rarely used and has not been implemented in the Tyk Dashboard UI, nor in the [Tyk OAS API](#response-headers-using-tyk-oas). + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure the response header transform middleware for your Tyk Classic API by following these steps. + +#### API-level transform + +Configuring the API-level response header transform middleware is very simple when using the Tyk Dashboard. + +In the Endpoint Designer you should select the **Global Version Settings** and ensure that you have selected the **Response Headers** tab: + +Configuring the API-level response header transform + +Note that you must click **ADD** to add a header to the list (for appending or deletion). + +#### Endpoint-level transform + +1. **Add an endpoint for the path and select the Header Transform plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to perform the transformation. Select the **Modify Headers** plugin. + + Adding the Modify Headers plugin to an endpoint + +2. **Select the "Response" tab** + + This ensures that the transform will be applied to responses prior to them being sent to the client. + + Selecting the response header transform + +3. **Declare the headers to be modified** + + Select the headers to delete and insert using the provided fields. You need to click **ADD** to ensure they are added to the list. + + Configuring the response header transform + +4. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the middleware. + +### Tyk Operator + +The process for configuring a response header transform in Tyk Operator is similar to that defined in section configuring the Response Header Transform in the Tyk Classic API Definition. Tyk Operator allows you to configure a response header transformation for [all endpoints of an API](#tyk-operator-endpoint) or for a [specific API endpoint](#tyk-operator-api). + +#### API-level transform + + +The process of configuring transformation of response headers for a specific API in Tyk Operator is similar to that defined in section [API-level transform](#tyk-classic-api) for the Tyk Classic API definition. + +To **append** headers to all responses from your API (i.e. for all endpoints) you must add a new `global_response_headers` object to the `versions` section of your API definition. This contains a list of key:value pairs, being the names and values of the headers to be added to responses. + +To **delete** headers from all responses from your API (i.e. for all endpoints), you must add a new `global_response_headers_remove` object to the `versions` section of the API definition. This contains a list of the names of existing headers to be removed from responses. + +An example is listed below: + +```yaml {linenos=true, linenostart=1, hl_lines=["25-30"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-global-header +spec: + name: httpbin-global-header + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-global-header + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + global_response_headers: + X-Static: foobar + X-Request-ID: "$tyk_context.request_id" + X-User-ID: "$tyk_meta.uid" + global_response_headers_remove: + - X-Secret +``` + +The example API Definition above configures an API to listen on path `/httpbin-global-header` and forwards requests upstream to http://httpbin.org. + +This configuration will add three new headers to each response: + +- `X-Static` with the value `foobar` +- `X-Request-ID` with a dynamic value taken from the `request_id` [context variable](/api-management/traffic-transformation/request-context-variables) +- `X-User-ID` with a dynamic value taken from the `uid` field in the [session metadata](/api-management/policies#what-is-a-session-metadata) + +It will also delete one header (if present) from each response: + +- `X-Secret` + + +#### Endpoint-level transform + + +The process of configuring a transformation of a response header for a specific endpoint in Tyk Operator is similar to that defined in section [endpoint-level transform](#tyk-classic-endpoint) for the Tyk Classic API definition. To configure a transformation of the response headers for a specific endpoint you must add a new `transform_response_headers` object to the `extended_paths` section of your API definition. + +In this example the Response Header Transform middleware (`transform_response_headers`) has been configured for HTTP `GET` requests to the `/xml` endpoint. Any response received from the upstream service following a request to that endpoint will have the `Content-Type` header added with a value set to `application/json`. + +#### Example + +```yaml {linenos=true, linenostart=1, hl_lines=["54-60"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-transform +spec: + name: httpbin-transform + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-transform + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + transform: + - method: POST + path: /anything + template_data: + enable_session: false + input_type: json + template_mode: blob + # base64 encoded template + template_source: eyJiYXIiOiAie3suZm9vfX0ifQ== + transform_headers: + - delete_headers: + - "remove_this" + add_headers: + foo: bar + path: /anything + method: POST + transform_response: + - method: GET + path: /xml + template_data: + enable_session: false + input_type: xml + template_mode: blob + # base64 encoded template + template_source: e3sgLiB8IGpzb25NYXJzaGFsIH19 + transform_response_headers: + - method: GET + path: /xml + add_headers: + Content-Type: "application/json" + act_on: false + delete_headers: [] +``` + +#### Tyk Gateway < 5.3.0 Example + +If using Tyk Gateway < v5.3.0 then a `response_processor` object must be added to the API definition containing a `header_injector` item, as highlighted below: + +```yaml {linenos=true, linenostart=1, hl_lines=["17", "19", "57-63"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-transform +spec: + name: httpbin-transform + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-transform + strip_listen_path: true + response_processors: + - name: response_body_transform + - name: header_injector + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_extended_paths: true + paths: + black_list: [] + ignored: [] + white_list: [] + extended_paths: + transform: + - method: POST + path: /anything + template_data: + enable_session: false + input_type: json + template_mode: blob + # base64 encoded template + template_source: eyJiYXIiOiAie3suZm9vfX0ifQ== + transform_headers: + - delete_headers: + - "remove_this" + add_headers: + foo: bar + path: /anything + method: POST + transform_response: + - method: GET + path: /xml + template_data: + enable_session: false + input_type: xml + template_mode: blob + # base64 encoded template + template_source: e3sgLiB8IGpzb25NYXJzaGFsIH19 + transform_response_headers: + - method: GET + path: /xml + add_headers: + Content-Type: "application/json" + act_on: false + delete_headers: [] +``` \ No newline at end of file diff --git a/api-management/traffic-transformation/virtual-endpoints.mdx b/api-management/traffic-transformation/virtual-endpoints.mdx new file mode 100644 index 000000000..94a8d2006 --- /dev/null +++ b/api-management/traffic-transformation/virtual-endpoints.mdx @@ -0,0 +1,756 @@ +--- +title: "Virtual Endpoints" +description: "How to configure Virtual Endpoints traffic transformation middleware in Tyk" +keywords: "Traffic Transformation, Virtual Endpoints" +sidebarTitle: "Virtual Endpoints" +--- + +## Overview + +Tyk's Virtual Endpoint is a programmable middleware component that is invoked towards the end of the request processing chain. It can be enabled at the per-endpoint level and can perform complex interactions with your upstream service(s) that cannot be handled by one of the other middleware components. + +Virtual endpoint middleware provides a serverless compute function that allows for the execution of custom logic directly within the gateway itself, without the need to proxy the request to an upstream service. This functionality is particularly useful for a variety of use cases, including request transformation, aggregation of responses from multiple services, or implementing custom authentication mechanisms. + +The Virtual Endpoint is an extremely powerful feature that is unique to Tyk and provides exceptional flexibility to your APIs. + +### Use Cases + +#### Aggregating data from multiple services + +From a virtual endpoint, you can make calls out to other internal and upstream APIs. You can then aggregate and process the responses, returning a single response object to the originating client. This allows you to configure a single externally facing API to simplify interaction with multiple internal services, leaving the heavy lifting to Tyk rather than starting up an aggregation service within your stack. + +#### Enforcing custom policies + +Tyk provides a very flexible [middleware chain](/api-management/traffic-transformation#request-middleware-chain) where you can combine functions to implement the access controls you require to protect your upstream services. Of course, not all scenarios can be covered by Tyk's standard middleware functions, but you can use a virtual endpoint to apply whatever custom logic you require to optimize your API experience. + +#### Dynamic Routing + +With a virtual endpoint you can implement complex dynamic routing of requests made to a single external endpoint on to different upstream services. The flexibility of the virtual endpoint gives access to data within the request (including the key session) and also the ability to make calls to other APIs to make decisions on the routing of the request. It can operate as a super-powered [URL rewrite](/transform-traffic/url-rewriting#url-rewrite-middleware) middleware. + +### Working + +The virtual endpoint middleware provides a JavaScript engine that runs the custom code that you provide either inline within the API definition or in a source code file accessible to the Gateway. The JavaScript Virtual Machine (JSVM) provided in the middleware is a traditional ECMAScript5 compatible environment which does not offer the more expressive power of something like Node.js. + +The virtual endpoint terminates the request, so the JavaScript function must provide the response to be passed to the client. When a request hits a virtual endpoint, the JSVM executes the JavaScript code which can modify the request, make calls to other APIs or upstream services, process data, and ultimately determines the response returned to the client. + + + +You will need to enable Tyk's JavaScript Virtual Machine by setting `enable_jsvm` to `true` in your `tyk.conf` [file](/tyk-oss-gateway/configuration#enable_jsvm) for your virtual endpoints to work. + + + +### Scripting virtual endpoint functions + +The [middleware scripting guide](/api-management/plugins/javascript#using-javascript-with-tyk) provides guidance on writing JS functions for your virtual endpoints, including how to access key session data and custom attributes from the API definition. + +#### Function naming + +The virtual endpoint middleware will invoke a named function within the JS code that you provide (either inline or in a file). Both the filename and function name are configurable per endpoint, but note that function names must be unique across your API portfolio because all plugins run in the same virtual machine. This means that you can share a single function definition across multiple endpoints and APIs but you cannot have two different functions with the same name (this applies across all [JavaScript middleware components](/api-management/plugins/javascript#)). + +Inline mode is mainly used by the dashboard to make code injection easier on multiple node deployments. + +### Virtual endpoint library + +We have put together a [library](https://github.com/TykTechnologies/custom-plugins#virtual-endpoints) of JS functions that you could use in your virtual endpoints. We welcome submissions from the Tyk community so if you've created a function that you think would be useful to other users, please open an issue in the Github repository and we can discuss bringing it into the library. + + + +Virtual endpoints are not available in Tyk Cloud Classic. + + + +
+ +{/* proposed "summary box" to be shown graphically on each middleware page + # Virtual Endpoint middleware summary + - The Virtual Endpoint middleware is an optional stage in Tyk's API Request processing chain, sitting between the [TBC]() and [TBC]() middleware. + - The Virtual Endpoint middleware can be configured at the per-endpoint level within the API Definition and is supported by the API Designer within the Tyk Dashboard. */} + + +## Using Tyk OAS + + +The [virtual endpoint](/api-management/traffic-transformation/virtual-endpoints) middleware provides a serverless compute function that allows for the execution of custom logic directly within the gateway itself, without the need to proxy the request to an upstream service. This functionality is particularly useful for a variety of use cases, including request transformation, aggregation of responses from multiple services, or implementing custom authentication mechanisms. + +The middleware is configured in the [Tyk OAS API Definition](/api-management/gateway-config-tyk-oas#operation). You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the legacy Tyk Classic APIs, then check out the [Tyk Classic](#virtual-endpoints-using-classic) page. + +### API Definition + +The design of the Tyk OAS API Definition takes advantage of the `operationId` defined in the OpenAPI Document that declares both the path and method for which the middleware should be added. Endpoint `paths` entries (and the associated `operationId`) can contain wildcards in the form of any string bracketed by curly braces, for example `/status/{code}`. These wildcards are so they are human readable and do not translate to variable names. Under the hood, a wildcard translates to the β€œmatch everything” regex of: `(.*)`. + +The virtual endpoint middleware (`virtualEndpoint`) can be added to the `operations` section of the Tyk OAS Extension (`x-tyk-api-gateway`) in your Tyk OAS API Definition for the appropriate `operationId` (as configured in the `paths` section of your OpenAPI Document). + +The `virtualEndpoint` object has the following configuration: + +- `enabled`: enable the middleware for the endpoint +- `functionName`: the name of the JavaScript function that will be executed when the virtual endpoint is triggered +- `body`: [optional] a `base64` encoded string containing the JavaScript code +- `path`: [optional] the relative path to the source file containing the JavaScript code +- `proxyOnError`: [optional, defaults to `false`] a boolean that determines the behavior of the gateway if an error occurs during the execution of the virtual endpoint's function; if set to `true` the request will be proxied to upstream if the function errors, if set to `false` the request will not be proxied and Tyk will return an error response +- `requireSession`: [optional defaults to `false`] a boolean that indicates whether the virtual endpoint should have access to the session object; if `true` then the key session data will be provided to the function as the `session` variable + + + + + One of either `path` or `body` must be provided, depending on whether you are providing the JavaScript code in a file or inline within the API definition. If both are provided then `body` will take precedence. + + + +For example: + +```json {hl_lines=["39-50", "54-58"],linenos=true, linenostart=1} +{ + "components": {}, + "info": { + "title": "example-virtual-endpoint", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-virtual-endpoint", + "state": { + "active": true, + "internal": false + } + }, + "upstream": { + "url": "http://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/example-virtual-endpoint/", + "strip": true + } + }, + "middleware": { + "global": { + "pluginConfig": { + "data": { + "enabled": true, + "value": { + "map": { + "key": 3 + }, + "num": 4, + "string": "example" + } + } + } + }, + "operations": { + "anythingget": { + "virtualEndpoint": { + "enabled": true, + "functionName": "myVirtualHandler", + "body": "ZnVuY3Rpb24gbXlWaXJ0dWFsSGFuZGxlciAocmVxdWVzdCwgc2Vzc2lvbiwgY29uZmlnKSB7ICAgICAgCiAgdmFyIHJlc3BvbnNlT2JqZWN0ID0gewogICAgQm9keTogIlZpcnR1YWwgRW5kcG9pbnQgIitjb25maWcuY29uZmlnX2RhdGEuc3RyaW5nLAogICAgSGVhZGVyczogewogICAgICAiZm9vLWhlYWRlciI6ICJiYXIiLAogICAgICAibWFwLWhlYWRlciI6IEpTT04uc3RyaW5naWZ5KGNvbmZpZy5jb25maWdfZGF0YS5tYXApLAogICAgICAic3RyaW5nLWhlYWRlciI6IGNvbmZpZy5jb25maWdfZGF0YS5zdHJpbmcsCiAgICAgICJudW0taGVhZGVyIjogSlNPTi5zdHJpbmdpZnkoY29uZmlnLmNvbmZpZ19kYXRhLm51bSkKICAgIH0sCiAgICBDb2RlOiAyMDAKICB9CiAgcmV0dXJuIFR5a0pzUmVzcG9uc2UocmVzcG9uc2VPYmplY3QsIHNlc3Npb24ubWV0YV9kYXRhKQp9" + } + } + } + } + } +} +``` + +In this example the virtual endpoint middleware has been configured for requests to the `GET /anything` endpoint. We have also configured the following custom attributes in the `pluginConfig` section of the API definition: + +```json +{ + "map": { + "key": 3 + }, + "num": 4, + "string": "example" +} +``` + +The `body` field value is a `base64` encoded string containing this JavaScript code, which will be invoked by the virtual endpoint middleware: + +```js +function myVirtualHandler (request, session, config) { + var responseObject = { + Body: "Virtual Endpoint "+config.config_data.string, + Headers: { + "foo-header": "bar", + "map-header": JSON.stringify(config.config_data.map), + "string-header": config.config_data.string, + "num-header": JSON.stringify(config.config_data.num) + }, + Code: 200 + } + return TykJsResponse(responseObject, session.meta_data) +} +``` + +A call to the `GET /anything` endpoint returns: + +```bash +HTTP/1.1 200 OK +Date: Fri, 01 Mar 2024 12:14:36 GMT +Foo-Header: bar +Map-Header: {"key":3} +Num-Header: 4 +Server: tyk +String-Header: example +Content-Length: 24 +Content-Type: text/plain; charset=utf-8 + +Virtual Endpoint example +``` + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the virtual endpoint middleware. + +### API Designer + +Adding a Virtual Endpoint to your API endpoints is easy when using the API Designer in the Tyk Dashboard, simply follow these steps: + +1. **Add an endpoint** + + From the **API Designer** add an endpoint that matches the path and method to which you want to apply the middleware. + + Tyk OAS API Designer showing no endpoints created + + Adding an endpoint to an API using the Tyk OAS API Designer + + Tyk OAS API Designer showing no middleware enabled on endpoint + +2. **Select the Virtual Endpoint middleware** + + Select **ADD MIDDLEWARE** and choose **Virtual Endpoint** from the *Add Middleware* screen. + + Adding the Virtual Endpoint middleware + +3. **Configure the middleware** + + Now you can provide either the path to a file containing the JavaScript function to be run by the middleare, or you can directly enter the JavaScript in the code editor. + + For both sources, you must provide the **function name** that should be called when the middleware executes. + + You can also optionally configure the behavior required if the function should return an error and also indicate to Tyk whether the virtual middleware requires access to the key session metadata. + + Configuring the Virtual Endpoint middleware + +4. **Save the API** + + Select **ADD MIDDLEWARE** to save the middleware configuration. Remember to select **SAVE API** to apply the changes. + +## Using Classic + + +The [virtual endpoint](/api-management/traffic-transformation/virtual-endpoints) middleware provides a serverless compute function that allows for the execution of custom logic directly within the gateway itself, without the need to proxy the request to an upstream service. This functionality is particularly useful for a variety of use cases, including request transformation, aggregation of responses from multiple services, or implementing custom authentication mechanisms. + +This middleware is configured in the Tyk Classic API Definition. You can do this via the Tyk Dashboard API or in the API Designer. + +If you're using the newer Tyk OAS APIs, then check out the [Tyk OAS](#virtual-endpoints-using-tyk-oas) page. + +If you're using Tyk Operator then check out the [configuring the middleware in Tyk Operator](#tyk-operator) section below. + +### API Definition + +If you want to use Virtual Endpoints, you must [enable Tyk's JavaScript Virtual Machine](/tyk-oss-gateway/configuration#enable_jsvm) by setting `enable_jsvm` to `true` in your `tyk.conf` file. + +To enable the middleware you must add a new `virtual` object to the `extended_paths` section of your API definition. + +The `virtual` object has the following configuration: + +- `path`: the endpoint path +- `method`: the endpoint HTTP method +- `response_function_name`: this is the name of the JavaScript function that will be executed when the virtual endpoint is triggered +- `function_source_type`: instructs the middleware to look for the JavaScript code either in a `file` or in a base64 encoded `blob`; the actual file location (or base64 encoded code) is provided in `function_source_uri` +- `function_source_uri`: if `function_source_type` is set to `file`, this will be the relative path to the source file containing the JavaScript code; if `function_source_type` if set to `blob`, this will be a `base64` encoded string containing the JavaScript code +- `use_session`: a boolean that indicates whether the virtual endpoint should have access to the session object; if `true` then the key session data will be provided to the function as the `session` variable +- `proxy_on_error`: a boolean that determines the behavior of the gateway if an error occurs during the execution of the virtual endpoint's function; if set to `true` the request will be proxied to upstream if the function errors, if set to `false` the request will not be proxied and Tyk will return an error response + +For example: + +```json {linenos=true, linenostart=1} +{ + "extended_paths": { + "virtual": [ + { + "response_function_name": "myUniqueFunctionName", + "function_source_type": "blob", + "function_source_uri": "ZnVuY3Rpb24gbXlVbmlxdWVGdW5jdGlvbk5hbWUocmVxdWVzdCwgc2Vzc2lvbiwgY29uZmlnKSB7CiB2YXIgcmVzcG9uc2VPYmplY3QgPSB7IAogIEJvZHk6ICJUSElTIElTIEEgVklSVFVBTCBSRVNQT05TRSIsIAogIENvZGU6IDIwMCAKIH0KIHJldHVybiBUeWtKc1Jlc3BvbnNlKHJlc3BvbnNlT2JqZWN0LCBzZXNzaW9uLm1ldGFfZGF0YSkKfQ==", + "path": "/anything", + "method": "GET", + "use_session": false, + "proxy_on_error": false + } + ] + } +} +``` + +In this example the Virtual Endpoint middleware has been configured for requests to the `GET /anything` endpoint. For any call made to this endpoint, Tyk will invoke the function `myUniqueFunctionName` that is `base64` encoded in the `function_source_uri` field. This virtual endpoint does not require access to the session data and will not proxy the request on to the upstream if there is an error when processing the `myUniqueFunctionName` function. + +Decoding the value in `function_source_uri` we can see that the JavaScript code is: + +```js {linenos=true, linenostart=1} +function myUniqueFunctionName(request, session, config) { + var responseObject = { + Body: "THIS IS A VIRTUAL RESPONSE", + Code: 200 + } + return TykJsResponse(responseObject, session.meta_data) +} +``` + +This function will terminate the request without proxying it to the upstream returning `HTTP 200` as follows: + +```bash +HTTP/1.1 200 OK +Date: Wed, 28 Feb 2024 20:52:30 GMT +Server: tyk +Content-Type: text/plain; charset=utf-8 +Content-Length: 26 + +THIS IS A VIRTUAL RESPONSE +``` + +If, however, we introduce an error to the JavaScript, such that Tyk fails to process the function, we will receive an `HTTP 500 Internal Server Error` as follows: + +```bash +HTTP/1.1 500 Internal Server Error +Date: Wed, 28 Feb 2024 20:55:27 GMT +Server: tyk +Content-Type: application/json +Content-Length: 99 + +{ +"error": "Error during virtual endpoint execution. Contact Administrator for more details." +} +``` + +If we set `proxy_on_error` to `true` and keep the error in the Javascript, the request will be forwarded to the upstream and Tyk will return the response received from that service. + +### API Designer + +You can use the API Designer in the Tyk Dashboard to configure a virtual endpoint for your Tyk Classic API by following these steps. + +1. **Add an endpoint for the path and select the plugin** + + From the **Endpoint Designer** add an endpoint that matches the path for which you want to trigger the virtual endpoint. Select the **Virtual Endpoint** plugin. + + Selecting the middleware + +2. **Configure the middleware** + + Once you have selected the virtual endpoint middleware for the endpoint, you need to supply: + + - JS function to call + - Source type (`file` or `inline`) + + If you select source type `file` you must provide the path to the file: + Configuring file based JS code + + If you select `inline` you can enter the JavaScript code in the Code Editor window. + Configuring inline JS code + +3. **Save the API** + + Use the *save* or *create* buttons to save the changes and activate the Virtual Endpoint middleware. + + + + + The Tyk Classic API Designer does not provide options to configure `use_session` or `proxy_on_error`, but you can do this from the Raw Definition editor. + + + +### Tyk Operator + +The process for configuring a virtual endpoint using Tyk Operator is similar to that explained in configuring the middleware in the Tyk Classic API Definition + +The example API Definition below configures an API to listen on path `/httpbin` and forwards requests upstream to `http://httpbin.org`. The Virtual Endpoint middleware has been configured for requests to the `GET /virtual` endpoint. For any call made to this endpoint, Tyk will invoke the function `myVirtualHandler` that is base64 encoded in the `function_source_uri` field. This virtual endpoint does not require access to the session data and will not proxy the request on to the upstream if there is an error when processing the `myVirtualHandler` function. + +```yaml {linenos=true, linenostart=1, hl_lines=["23-35"]} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: test-config-data-test +spec: + name: test-config-data-test + protocol: http + proxy: + listen_path: /httpbin/ + target_url: http://httpbin.org + strip_listen_path: true + active: true + use_keyless: true + enable_context_vars: true + version_data: + default_version: Default + not_versioned: false + versions: + Default: + name: Default + use_extended_paths: true + extended_paths: + virtual: + - function_source_type: blob + response_function_name: myVirtualHandler + function_source_uri: "ZnVuY3Rpb24gbXlWaXJ0dWFsSGFuZGxlciAocmVxdWVzdCwgc2Vzc2lvbiwgY29uZmlnKSB7ICAgICAgCiAgdmFyIHJlc3BvbnNlT2JqZWN0ID0gewogICAgQm9keTogIlRISVMgSVMgQSAgVklSVFVBTCBSRVNQT05TRSIsCiAgICBIZWFkZXJzOiB7CiAgICAgICJmb28taGVhZGVyIjogImJhciIsCiAgICAgICJtYXAtaGVhZGVyIjogSlNPTi5zdHJpbmdpZnkoY29uZmlnLmNvbmZpZ19kYXRhLm1hcCksCiAgICAgICJzdHJpbmctaGVhZGVyIjogY29uZmlnLmNvbmZpZ19kYXRhLnN0cmluZywKICAgICAgIm51bS1oZWFkZXIiOiBKU09OLnN0cmluZ2lmeShjb25maWcuY29uZmlnX2RhdGEubnVtKQogICAgfSwKICAgICAgQ29kZTogMjAwCiAgfQogIHJldHVybiBUeWtKc1Jlc3BvbnNlKHJlc3BvbnNlT2JqZWN0LCBzZXNzaW9uLm1ldGFfZGF0YSkKfQ==" + path: /virtual + method: GET + use_session: false + proxy_on_error: false + config_data: + string: "string" + map: + key: 3 + num: 4 +``` + +Decoding the value in `function_source_uri` we can see that the JavaScript code is: + +```javascript +function myVirtualHandler (request, session, config) { + var responseObject = { + Body: "THIS IS A VIRTUAL RESPONSE", + Headers: { + "foo-header": "bar", + "map-header": JSON.stringify(config.config_data.map), + "string-header": config.config_data.string, + "num-header": JSON.stringify(config.config_data.num) + }, + Code: 200 + } + return TykJsResponse(responseObject, session.meta_data) +} +``` + +This function will terminate the request without proxying it to the upstream, returning HTTP 200 as follows: + +```bash +HTTP/1.1 200 OK +Date: Wed, 14 Aug 2024 15:37:46 GMT +Foo-Header: bar +Map-Header: {"key":3} +Num-Header: 4 +Server: tyk +String-Header: string +Content-Length: 27 +Content-Type: text/plain; charset=utf-8 + +THIS IS A VIRTUAL RESPONSE +``` + +If, however, we introduce an error to the JavaScript, such that Tyk fails to process the function, we will receive an HTTP 500 Internal Server Error as follows: + +```bash +HTTP/1.1 500 Internal Server Error +Date: Wed, 14 Aug 2024 15:37:46 GMT +Server: tyk +Content-Type: application/json +Content-Length: 99 + +{ +"error": "Error during virtual endpoint execution. Contact Administrator for more details." +} +``` + +If we set `proxy_on_error` to `true` and keep the error in the Javascript, the request will be forwarded to the upstream and Tyk will return the response received from that service. + +## Examples + +### Accessing Tyk data objects + +In this example, we demonstrate how you can access different [external Tyk objects](/api-management/plugins/javascript#accessing-external-and-dynamic-data) (API request, session key, API definition). + +1. Enable the Virtual Endpoint middleware on an endpoint of your API and paste this JavaScript into the API Designer (or save in a file and reference it from the middleware config): + +```javascript +function myFirstVirtualHandler (request, session, config) { + log("Virtual Test running") + + log("Request Body: " + request.Body) + log("Session: " + JSON.stringify(session.allowance)) + log("Config: " + JSON.stringify(config.APIID)) + log("param-1: " + request.Params["param1"]) // case sensitive + log("auth Header: " + request.Headers["Authorization"]) // case sensitive + + var responseObject = { + Body: "VIRTUAL ENDPOINT EXAMPLE #1", + Headers: { + "x-test": "virtual-header", + "x-test-2": "virtual-header-2" + }, + Code: 200 + } + + return TykJsResponse(responseObject, session.meta_data) +} +log("Virtual Test initialised") +``` + +2. Make a call to your API endpoint passing a request body, a value in the `Authorization` header and a query parameter `param1`. + +3. The virtual endpoint will terminate the request and return this response: + +```bash +HTTP/1.1 200 OK +Date: Thu, 29 Feb 2024 17:39:00 GMT +Server: tyk +X-Test: virtual-header +X-Test-2: virtual-header-2 +Content-Length: 27 +Content-Type: text/plain; charset=utf-8 + +VIRTUAL ENDPOINT EXAMPLE #1 +``` + +4. The gateway logs will include: + +```text +time="" level=info msg="Virtual Test running" prefix=jsvm type=log-msg +time="" level=info msg="Request Body: " prefix=jsvm type=log-msg +time="" level=info msg="Session: " prefix=jsvm type=log-msg +time="" level=info msg="Config: " prefix=jsvm type=log-msg +time="" level=info msg="param-1: " prefix=jsvm type=log-msg +time="" level=info msg="auth Header: " prefix=jsvm type=log-msg +``` + +### Accessing custom attributes in the API Definition + +You can add [custom attributes](/api-management/plugins/javascript#adding-custom-attributes-to-the-api-definition) to the API definition and access these from within your Virtual Endpoint. + +1. Add the following custom attributes to your API definition: + +```json +{ + "string": "string", + "map": { + " key": 3 + }, + "num": 4 +} +``` + +2. Enable the Virtual Endpoint middleware on an endpoint of your API and paste this JavaScript into the API Designer (or save in a file and reference it from the middleware config): + +```js +function mySecondVirtualHandler (request, session, config) { + var responseObject = { + Body: "VIRTUAL ENDPOINT EXAMPLE #2", + Headers: { + "foo-header": "bar", + "map-header": JSON.stringify(config.config_data.map), + "string-header": config.config_data.string, + "num-header": JSON.stringify(config.config_data.num) + }, + Code: 200 + } + return TykJsResponse(responseObject, session.meta_data) +} +``` + +3. Make a call to your API endpoint. + +4. The virtual endpoint will terminate the request and return this response: + +```bash +HTTP/1.1 200 OK +Date: Thu, 29 Feb 2024 17:29:12 GMT +Foo-Header: bar +Map-Header: {" key":3} +Num-Header: 4 +Server: tyk +String-Header: string +Content-Length: 26 +Content-Type: text/plain; charset=utf-8 + +VIRTUAL ENDPOINT EXAMPLE #2 +``` + +### Advanced example + +In this example, every line in the script gives an example of a functionality usage, including: + +- how to get form param +- how to get to a specific key inside a JSON variable +- the structure of the request object +- using `TykMakeHttpRequest` to make an HTTP request from within the virtual endpoint, and the json it returns - `.Code` and `.Body`. + +```js +function myVirtualHandlerGetHeaders (request, session, config) { + rawlog("Virtual Test running") + + //Usage examples: + log("Request Session: " + JSON.stringify(session)) + log("API Config:" + JSON.stringify(config)) + + log("Request object: " + JSON.stringify(request)) + log("Request Body: " + JSON.stringify(request.Body)) + log("Request Headers:" + JSON.stringify(request.Headers)) + log("param-1:" + request.Params["param1"]) + + log("Request header type:" + typeof JSON.stringify(request.Headers)) + log("Request header:" + JSON.stringify(request.Headers.Location)) + + + //Make api call to upstream target + newRequest = { + "Method": "GET", + "Body": "", + "Headers": {"location":JSON.stringify(request.Headers.Location)}, + "Domain": "http://httpbin.org", + "Resource": "/headers", + "FormData": {} + }; + rawlog("--- before get to upstream ---") + response = TykMakeHttpRequest(JSON.stringify(newRequest)); + rawlog("--- After get to upstream ---") + log("response type: " + typeof response); + log("response: " + response); + usableResponse = JSON.parse(response); + var bodyObject = JSON.parse(usableResponse.Body); + + var responseObject = { + //Body: "THIS IS A VIRTUAL RESPONSE", + Body: "yo yo", + Headers: { + "test": "virtual", + "test-2": "virtual", + "location" : bodyObject.headers.Location + }, + Code: usableResponse.Code + } + + rawlog("Virtual Test ended") + return TykJsResponse(responseObject, session.meta_data) +} +``` + +### Running the Advanced example + +You can find a Tyk Classic API definition [here](https://gist.github.com/letzya/5b5edb3f9f59ab8e0c3c614219c40747) that includes the advanced example, with the JS encoded `inline` within the middleware config for the `GET /headers` endpoint. + +Create a new Tyk Classic API using that API definition and then run the following command to send a request to the API endpoint: + +```bash +curl http://tyk-gateway:8080/testvirtualendpoint2/headers -H "location: /get" -v +``` + +This should return the following: + +```bash +Trying 127.0.0.1... +TCP_NODELAY set +Connected to tyk-gateway (127.0.0.1) port 8080 (#0) +GET /testvirtualendpoint2/headers HTTP/1.1 +Host: tyk-gateway:8080 +User-Agent: curl/7.54.0 +Accept: */* +location: /get + +HTTP/1.1 200 OK +Date: Fri, 08 Jun 2018 21:53:57 GMT +**Location: /get** +Server: tyk +Test: virtual +Test-2: virtual +Content-Length: 5 +Content-Type: text/plain; charset=utf-8 + +Connection #0 to host tyk-gateway left intact +yo yo +``` + +### Checking the Tyk Gateway Logs + +The `log` and `rawlog` commands in the JS function write to the Tyk Gateway logs. If you check the logs you should see the following: + +```text +[Jun 13 14:45:21] DEBUG jsvm: Running: myVirtualHandlerGetHeaders +Virtual Test running +[Jun 13 14:45:21] INFO jsvm-logmsg: Request Session: {"access_rights":null,"alias":"","allowance":0,"apply_policies":null,"apply_policy_id":"","basic_auth_data":{"hash_type":"","password":""},"certificate":"","data_expires":0,"enable_detail_recording":false,"expires":0,"hmac_enabled":false,"hmac_string":"","id_extractor_deadline":0,"is_inactive":false,"jwt_data":{"secret":""},"last_check":0,"last_updated":"","meta_data":null,"monitor":{"trigger_limits":null},"oauth_client_id":"","oauth_keys":null,"org_id":"","per":0,"quota_max":0,"quota_remaining":0,"quota_renewal_rate":0,"quota_renews":0,"rate":0,"session_lifetime":0,"tags":null} type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: API Config:{"APIID":"57d72796c5de45e649f22da390d7df43","OrgID":"5afad3a0de0dc60001ffdd07","config_data":{"bar":{"y":3},"foo":4}} type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: Request object: {"Body":"","Headers":{"Accept":["*/*"],"Location":["/get"],"User-Agent":["curl/7.54.0"]},"Params":{"param1":["I-am-param-1"]},"URL":"/testvirtualendpoint2/headers"} type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: Request Body: "" type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: Request Headers:{"Accept":["*/*"],"Location":["/get"],"User-Agent":["curl/7.54.0"]} type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: param-1:I-am-param-1 type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: Request header type:[object Object] type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: Request header: ["/get"] type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: Request location type: object type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: Request location type: string type=log-msg +[Jun 13 14:45:21] INFO jsvm-logmsg: Request location: /get type=log-msg +--- before get to upstream --- +--- After get to upstream --- +[Jun 13 14:45:22] INFO jsvm-logmsg: response type: string type=log-msg +[Jun 13 14:45:22] INFO jsvm-logmsg: response: {"Code":200,"Body":"{\"headers\":{\"Accept-Encoding\":\"gzip\",\"Connection\":\"close\",\"Host\":\"httpbin.org\",\"Location\":\"/get\",\"User-Agent\":\"Go-http-client/1.1\"}}\n","Headers":{"Access-Control-Allow-Credentials":["true"],"Access-Control-Allow-Origin":["*"],"Content-Length":["133"],"Content-Type":["application/json"],"Date":["Wed, 13 Jun 2018 13:45:21 GMT"],"Server":["gunicorn/19.8.1"],"Via":["1.1 vegur"]},"code":200,"body":"{\"headers\":{\"Accept-Encoding\":\"gzip\",\"Connection\":\"close\",\"Host\":\"httpbin.org\",\"Location\":\"/get\",\"User-Agent\":\"Go-http-client/1.1\"}}\n","headers":{"Access-Control-Allow-Credentials":["true"],"Access-Control-Allow-Origin":["*"],"Content-Length":["133"],"Content-Type":["application/json"],"Date":["Wed, 13 Jun 2018 13:45:21 GMT"],"Server":["gunicorn/19.8.1"],"Via":["1.1 vegur"]}} type=log-msg +Virtual Test ended +[Jun 13 14:45:22] DEBUG JSVM Virtual Endpoint execution took: (ns) 191031553 +``` + +### Aggregating upstream calls using batch processing + +One of the most common use cases for virtual endpoints is to provide some form of aggregate data to your users, combining the responses from multiple upstream service calls. This virtual endpoint function will do just that using the batch processing function from the [JavaScript API](/api-management/plugins/javascript#javascript-api) + +```js +function batchTest(request, session, config) { + // Set up a response object + var response = { + Body: "", + Headers: { + "test": "virtual-header-1", + "test-2": "virtual-header-2", + "content-type": "application/json" + }, + Code: 200 + } + + // Batch request + var batch = { + "requests": [ + { + "method": "GET", + "headers": { + "x-tyk-test": "1", + "x-tyk-version": "1.2", + "authorization": "1dbc83b9c431649d7698faa9797e2900f" + }, + "body": "", + "relative_url": "http://httpbin.org/get" + }, + { + "method": "GET", + "headers": {}, + "body": "", + "relative_url": "http://httpbin.org/user-agent" + } + ], + "suppress_parallel_execution": false + } + + log("[Virtual Test] Making Upstream Batch Request") + var newBody = TykBatchRequest(JSON.stringify(batch)) + + // We know that the requests return JSON in their body, lets flatten it + var asJS = JSON.parse(newBody) + for (var i in asJS) { + asJS[i].body = JSON.parse(asJS[i].body) + } + + // We need to send a string object back to Tyk to embed in the response + response.Body = JSON.stringify(asJS) + + return TykJsResponse(response, session.meta_data) + +} +log("Batch Test initialised") +``` + diff --git a/api-management/troubleshooting-debugging.mdx b/api-management/troubleshooting-debugging.mdx new file mode 100644 index 000000000..8ae9f22fe --- /dev/null +++ b/api-management/troubleshooting-debugging.mdx @@ -0,0 +1,1618 @@ +--- +title: "Troubleshooting and Debugging" +description: "Tyk troubleshooting and debugging gateway, streams, pump, dashboard" +keywords: "troubleshooting, debugging, Open Source, Self-Managed, Tyk Cloud, API Gateway" +sidebarTitle: "Troubleshooting" +--- + +## Gateway + +1. ##### Users receive 499 error in the Gateway + + **Cause** + + The Gateway receives closed client responses from the upstream client. There are number of different configuration settings that could bring about this issue. + + **Solution** + + For a standard web app, used by standard HTTP clients 499 errors are not a problem. + ​ + + However, in some specific cases, depending on the service you provide, your clients can have their own fixed constraints. + For example, if you are building an API used by IoT devices, and those devices internally have a strict 2 second timeout for HTTP calls and your service responding with > 2 seconds. In this case a lot of 499 errors may mean that a lot of clients are malfunctioning, and you should investigate this behavior. + + On the other hand, sometimes a client closing the connection before reading the server response is expected functionality. Taking the same example as above, you may have some IoT sensor, which just pushes data to your servers in "fire and forgot" mode, and does not care about the server response. In this case a 499 error is completely expected behavior. + + +2. ##### Users receive 502 error in the Gateway + + **Cause** + + The Gateway received an invalid response from the upstream server. There are number of different configuration settings that could bring about this issue. + + **Solution** + + Try using the following settings in your tyk.conf file: + + ```{.copyWrapper} + enable_detailed_recording: false, + enable_jsvm: false, + ``` + + + And the following key-value pairs should be set in the relevant API definition: + + ```{.copyWrapper} + proxy.service_discovery.use_nested_query = false + proxy.service_discovery.use_target_list = true + proxy.service_discovery.endpoint_returns_list = true + proxy.service_discovery.data_path = ""Address” + proxy.service_discovery.port_data_path = β€œServicePort"" + ``` + + See [Tyk Gateway configuration](/tyk-oss-gateway/configuration) and [Tyk Gateway API](/api-management/gateway-config-tyk-classic) for further information regarding API definition settings. + +3. ##### Gateway proxy error "context canceled" + + In some cases you can see "proxy error: context canceled" error message in the Gateway logs. + The error itself means that the connection was closed unexpectedly. + It can happen for various reasons, and in some cases it is totally fine: for example client can have unstable mobile internet. + + When it happens on the high load, it can be a lot of different reasons. + For example your OS is running out of system limits, like number of opened sockets, and to validate it, you need to try your system limits. + See [this guide](/planning-for-production). + + Additionally, it can be CPU bottleneck: you can't process more than your machine can do. + And note that it is not only about the actual utilization %, it is also about context switches it has to do. + E.g. having one job which consume 100% of your CPU/cores vs having a few thousands jobs, causing CPU constantly switch between them. + Such problems cause internal request processing queues, which cause latency growth (highly recommend measure it). + And in some cases latency can grow so big, that some clients can just disconnect/timeout because of it. + + Additionally, highly recommend read the following blog post https://tyk.io/blog/performance-tuning-your-tyk-api-gateway/. + For example, you can trade memory for performance, and context switch reduction by tuning garbage collector to run less frequently: see `Tuning Tyk’s Garbage Collector` section. + + + Also note that it is not Tyk or Golang specific. + The problem described above will happen with any webserver on high scale. + So in general if you see a lot of "context" errors on high load, use it as a sign that the process is really struggling with the given load, and you need scale it up, either vertically or horizontally. + +4. ##### Invalid memory address or nil pointer dereference error + + **Cause** + + There are a number of reasons, most commonly, an API may have been configured incorrectly in some way (for instance, it may have been set up without an organization). The error itself is a specific to Go language which Tyk was written in and could also suggest that alterations made to the code by the user could also be the culprit. + + **Solution** + + Make sure that API definitions are set up correctly. Information on how to do this with the Tyk Gateway API can be found in the following links: + + * [API Definition Object Details](/api-management/gateway-config-tyk-classic) + * [API Management](/tyk-gateway-api) + +5. ##### Users receive this error message when attempting to make API calls to an existing key. + + **Cause** + When the token was created, most probably it was configured without the `meta_data` key. + + **Solution** + The user will need to add the key-value pair `meta_data: {}` to their key as per the [Tyk Gateway REST API Documentation](/tyk-gateway-api). + +6. ##### There was a problem proxying the request + + **Cause** + + The upstream server may have returned an empty response or cut the response off early so it was unable to complete the proxying process. A proxy error means actual connectivity issues between Tyk and the target host (i.e., a connection-level issue with the downstream server misbehaving for some reason). + + Expired TLS certificates may also cause issues. + + **Solution** + + Users are advised to upgrade to the latest versions of any Tyk packages at their earliest convenience as a patch was released to resolve this issue. Packages are available to download from [Packagecloud.io][1]. See [Upgrading Tyk](/developer-support/upgrading) for details on upgrading to the latest version. It may also be worth checking if any TLS certificates associated with the domain have expired. + + [1]: https://packagecloud.io/tyk + +6. ##### Tyk Gateway Profiling + + In some cases, to identify tricky issues like concurrency or memory related issues, it may be required to get information about the Gateway process runtime. For example, memory or CPU usage details. + The Tyk Gateway is built using Go, and inherits its powerful profiling tools, specifically Google's [`pprof`](https://github.com/google/pprof/). + + The Tyk Gateway can generate various profiles in the `pprof` supported format, which you can analyze by yourself, using the `go tool pprof` command, or you can send the profiles to our support team for analysis. + + There are two way to get profiles: + + 1. Running the process with flags mentioned below which will gather information about the running process for the first 30 seconds, and will generate files containing profiling info: + + * `--memprofile` - memory profile, generates `tyk.mprof` file + * `--cpuprofile` - CPU usage profile, generates `tyk.prof` file + * `--blockprofile` - Blocking profile, generates `tyk.blockprof` file + * `--mutexprofile` - Mutex profile, generates `tyk.mutexprof` file + + 2. Running with the `--httpprofile` flag, or set `enable_http_profiler` to `true` in tyk.conf, which will run a special `/debug/pprof/` public web page, containing dynamic information about the running process, and where you can download various profiles: + + * goroutine - stack traces of all current goroutines + * heap - a sampling of all heap allocations + * threadcreate - stack traces that led to the creation of new OS threads + * block - stack traces that led to blocking on synchronisation primitives + * mutex - stack traces of holders of contended mutexes + +##### Support Information + + + When contacting support, you may be asked to supply extra information and supply log files, etc, so we can quickly handle your request. Questions may include: + + * "Can you send us your log files" + * "Can you change the logging detail level" + * "What version of Tyk are you on" + * "What profiling information can I get" + + + This page will let you know how to get the above info to us. + + **Log Files** + + **Where do I find my log files?** + + The Gateway will log its output to `stderr` and `stdout`. In a typical installation, these will be handled or redirected by the service manager running the process, and depending on the Linux distribution, will either be output to `/var/log/` or `/var/log/upstart`. + + Tyk will try to output structured logs, and so will include context data around request errors where possible. + + **How do I increase Logging Verbosity?** + + You can set the logging verbosity in two ways: + + 1. Via an Environment Variable to affect all Tyk components + 2. Just for the Gateway via your `tyk.conf` config file + + **Setting via Environment Variable** + + The environment variable is `TYK_LOGLEVEL`. + + By default, the setting is `info`. You also have the following options: + + * `debug` + * `warn` + * `error` + + You will be advised by support which setting to change the logging level to. + + **For the Gateway** + + You can set the logging level in your `tyk.conf` by adding the following: + + ```{.copyWrapper} + "log_level": "info", + ``` + + By default, the setting is `info`. You also have the following options: + + * `debug` + * `warn` + * `error` + + You will be advised by support which setting to change the logging level to. + + **Tyk Version** + + For support requests it is beneficial to provide more information about your Gateway build. These pinpoint the exact Gateway build that is in use. + + - Since Gateway version `5.0.8` or `5.2.3` you can inspect detailed build information by running `tyk version`. The information also includes the Go version it was built with, the operating system and architecture. + + - If you're running an an older version than the above, `tyk --version` prints out the release version for your Gateway binary. + + The binary is installed in `/opt/tyk-gateway/tyk` by default. If your binary is not available in your `PATH` environment, invoke it from there. + + **Profile Information** + + You can provide various profile information for us in [pprof format](https://github.com/google/pprof/). See [Gateway Profiling](#tyk-gateway-profiling) for more details. + +8. ##### API definition URL case sensitive + + For security reasons Tyk lowercases the URL before performing any pattern matching. + +9. ##### Gateway detected 0 APIs + + Tyk Gateway is not able to get API configs from the Tyk Portal. + If you configured your Gateway to be segmented, you would also need to assign tags and you must also tag the APIs in the API Designer to make sure that they load. + + * In the Pro edition that is a connectivity or misconfiguration issue + * In the Community edition, since you are not using the Dashboard we + assume that you use file-based APIs , so in this case it's because + API definition files are missing. + +10. ##### How to import existing keys into Tyk CE + + You can use an API to import existing keys that were not created in Tyk into Tyk's Gateway. + This doc explains how to do that with the Gateway's APIs directly. + + This example uses standard `authorization` header authentication, and assumes that the Gateway is located at `127.0.0.1:8080` and the Tyk secret is `352d20ee67be67f6340b4c0605b044b7` - update these as necessary to match your environment. + + To import a key called `mycustomkey`, save the JSON contents as `token.json` (see example below), then run the following Curl command: + + The Example `token.json` file + + ```{.json} + { + "allowance": 1000, + "rate": 1000, + "per": 60, + "expires": -1, + "quota_max": -1, + "quota_renews": 1406121006, + "quota_remaining": 0, + "quota_renewal_rate": 60, + "access_rights": { + "3": { + "api_name": "Tyk Test API", + "api_id": "3" + } + }, + "org_id": "53ac07777cbb8c2d53000002", + "basic_auth_data": { + "password": "", + "hash_type": "" + }, + "hmac_enabled": false, + "hmac_string": "", + "is_inactive": false, + "apply_policy_id": "", + "apply_policies": [ + "59672779fa4387000129507d", + "53222349fa4387004324324e", + "543534s9fa4387004324324d" + ], + "monitor": { + "trigger_limits": [] + } + } + ``` + + The import of the key to Tyk: + + ``` + curl http://127.0.0.1:8080/tyk/keys/mycustomkey -H 'x-tyk-authorization: 352d20ee67be67f6340b4c0605b044b7' -H 'Content-Type: application/json' -d @token.json + ``` + + Test the key after the import: + + ``` + curl http://127.0.0.1:8080/quickstart/headers -H 'Authorization: mycustomkey' + ``` + + See also the Keys section of the [Tyk Gateway API documentation](/tyk-gateway-api). + + +10. ##### Redis persistence using containers + + Use case: Keep my data persistent at Docker container restart + + The Multi-Cloud Redis container is ephemeral, it isn't configured for persistence because it would very quickly get very large (Docker containers in general should really be considered as ephemeral). + + If using Redis with Multi-Cloud we strongly recommend using an external Redis database. + + There are no settings for Redis available via environment variable, you would need to mount a new `redis.conf` into the container to customize the configuration, but again, we don't recommend it. + +11. ##### DRL not ready, skipping this notification + + **Description** + + You see the following `Log Warning:` + + `DRL not ready, skipping this notification` + + + **Cause** + + There can be a couple of reasons for seeing this error about the [Distributed Rate Limiter](/api-management/rate-limit#rate-limiting-layers): + + 1. When you have more than one installation of the Gateway with one configured to use DRL, and others not. + 2. When the Gateway is started and the DRL receives an event before it has finished initialising. + + **Solution** + + For cause **1**, ensure that all instances of the Tyk Gateway are configured to use DRL. + + For cause **2**, the error will disappear when the DRL has initialised. + +12. ##### "Index out of rangeβ€œ error in logs + + **Description** + + Redis cluster users receive the aforementioned error message in their logs. The log stack may resemble the following: + + ``` + 2016/06/22 09:58:41 http: panic serving 10.0.0.1:37196: runtime error: index out of range + 2016/06/22 09:58:41 http: panic serving 10.0.0.1:37898: runtime error: index out of range + 2016/06/22 09:58:41 http: panic serving 10.0.0.1:38013: runtime error: index out of range + 2016/06/22 09:58:42 http: panic serving 10.0.0.1:39753: runtime error: index out of range + 2016/06/22 10:01:07 http: panic serving 10.0.0.1:34657: runtime error: invalid memory address or nil pointer dereference + 2016/06/22 10:01:07 http: panic serving 10.0.0.1:36801: runtime error: invalid memory address or nil pointer dereference + ``` + + **Cause** + + This is due to a bug that prevents the driver from picking up a random redis handle in single-instance connections such as pub/sub. The issue affects later patch releases of Tyk 2.2 and the first release of Tyk 2.3. + + **Solution** + + Users are advised to upgrade to the latest versions of any Tyk packages as a patch was released to resolve this issue. Packages are available to download from [Packagecloud.io](https://packagecloud.io/tyk) and further details on how to upgrade can be found [here](/developer-support/upgrading). + +13. ##### Hot restart a Tyk Gateway Process + + It is possible to hot-restart a Tyk Gateway process without dropping any connections. This can be useful if you need to load up a new configuration or change a configuration on a production server without losing any traffic. + + To hot-restart a Tyk Gateway process, you simply need to send a `SIGUSR2` signal to the process, for example: + + ```bash + > sudo kill -SIGUSR2 {gateway-pid} + ``` + + This will fork and load a new process, passing all open handles to the new server and wait to drain the old ones. + +14. ##### How to add Custom Certificates to Trusted Storage of Docker Images + + To add your custom Certificate Authority(CA) to your docker containers. You can mount your CA certificate directly into `/etc/ssl/certs` folder. + + Docker: + ```{.copyWrapper} + docker run -it tykio/tyk-gateway:latest \ + -v $(pwd)/myCA.pem:/etc/ssl/certs/myCA.pem + ``` + + Kubernetes - using Helm Chart and secrets: + ```yaml + extraVolumes: + - name: self-signed-ca + secret: + secretName: self-signed-ca-secret + extraVolumeMounts: + - name: self-signed-ca + mountPath: "/etc/ssl/certs/myCA.pem" + subPath: myCA.pem + ``` + +15. ##### How to change the logging output location + + It's not possible to segregate out the error locations in the `tyk.conf`, but you can modify the actual initialisation files to specify the log location, we supply initialisation scripts for `SysV`, `systemd` and `upstart`. + +16. ##### How to clear / invalidate API cache + + Use the REST API to clear the cache + + **OSS** + + ``` + DELETE /tyk/cache/{api-id} + ``` + + **Tyk Dashboard** + + ``` + DELETE /api/cache/{api-id} + ``` + +17. ##### How to find the Gateway logging output + + You are able to see a more detailed output in your Gateway log `/var/log` or `/var/log/upstart`. + +## Gateway Error Response Status Codes + +Tyk Gateway responses include HTTP status codes that follow the [HTTP status code standard](https://datatracker.ietf.org/doc/html/rfc9110). They have three digits that describe the result of the request and the semantics of the response. +The first digit defines the class of response as shown in the [list](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes) below: +- 1xx (Informational): The request was received, continuing process +- 2xx (Successful): The request was successfully received, understood, and accepted +- 3xx (Redirection): Further action needs to be taken in order to complete the request +- 4xx (Client Error): The request contains bad syntax or cannot be fulfilled +- 5xx (Server Error): The server failed to fulfill an apparently valid request + +Here we provide a list of all the error status codes (4xx and 5xx) that may be returned by the Tyk Gateway along with their corresponding messages and some guidance on the likely cause of the error. +Tyk supports [error templating](/api-management/gateway-events#error-templates), allowing you to configure the Gateway to return customised messages for certain HTTP error codes. + +We also support limited customisation of the error codes and messages returned by custom authentication middleware through the use of [override messages](/tyk-oss-gateway/configuration#override_messages). + +| Code | Text | Recommended action | +| :--- | :-------------------------------------------------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 400 | Access to this API has been disallowed | Check if the key has access to the right API version or definition. Check if the authentication key used is still valid. Check if the certificate used for authentication is present. Check if the authentication key is created and present in the database. You can use Gateway Keys APIs for confirmation. Check if API definition is using JWT auth and if auth header key and or value is empty or missing.| +| 400 | API is not OAuth2 | Check if OAuth2 is integrated into the API by auth tokens or using Tyk OAuth flow. | +| 400 | Attempted access with malformed header | Values not in basic auth format or auth data not encoded correctly. | +| 400 | Authorization Field Missing | Check if the authorization field is missing. Check if the OAuth authorization field is missing. | +| 400 | Batch request creation failed, request structure malformed | Attempted to construct unsafe requests. Check if request structure is in correct format. | +| 400 | Batch request malformed | Attempted to decode request but failed. Check if request structure is in correct format. | +| 400 | Bearer token malformed | Check if the OAuth authorization field is malformed. | +| 400 | Body do not contain password or username | Check if body contains both password and username. If not, then insert the correct login credentials. | +| 400 | Cannot parse form. Form malformed | Attempted to revoke token but could not parse the request form. Check if the request form is malformed. | +| 400 | Content length is not a valid Integer | Check the value provided in the Content-Length field in the header. | +| 400 | Couldn’t decode instruction | Attempted to decode policy record from an update request. Check if the request body is malformed and is valid. | +| 400 | Couldn’t decode OAS object | Attempted to import OAS Tyk API but failed to retrieve object from request. Check if request body is valid. | +| 400 | Error API not migrated | The supplied API definition is in OAS format. Please use the Tyk native format for this API. | +| 400 | Failed to create key, keys must have at least one Access Rights record set | Attempted to create a key with master keys disabled in configurations. | +| 400 | Failed to remove the key | Failed to delete requested key. Make sure orgID and keyname are correct. | +| 400 | Health checks are not enabled for this node | Enable health checks for the gateway. | +| 400 | Key not authorized | Check if OAuth key is present. Check if the OAuth client is not deleted. Check if there is a valid policy associated with the key/token used. Check if the policy associated with the key is not expired or if the owner is valid. Check if JWT default policies exist. | +| 400 | Key cannot be used without a certificate | Check if key contains a certificate. If not, add a certificate to the key. | +| 400 | Key must be used with an existent certificate | Check if the certificate on the key exist within the system. | +| 400 | Missing parameter api_id | Check if API_ID is missing. If so, fill in the api_ID field with the correct value. | +| 400 | OAuth client doesn’t exist | Check if API_ID is missing. If so, fill in the api_ID field with the correct value. | +| 400 | OAuth client ID is empty | Check if OAuth client ID field is empty. If so, fill in with the correct client ID value. | +| 400 | OAuth is not enabled for this API | Check if OAuth is enabled for the API. | +| 400 | Policy access rights doesn’t contain API this OAuth client belongs to | Check if the policy rights contains the proper api_ID for the API. | +| 400 | Request apiID does not match that in Definition! For Update operations these must match | Attempted a PUT operation using different api_ID's. Make sure the api_ID's are the same. | +| 400 | Request field is missing | Check if the request field is missing. If so, fill in the request field. | +| 400 | Request ID does not match that in policy! For Update operations these must match | Attempted a PUT operation using different policy ID's. Make sure both policy ID's are the same. | +| 400 | Request is too large | The request body exceeds the configured size limit for the API endpoint. | +| 400 | Request with empty authorization header | Fill in authorization header for the request. | +| 400 | Spec field is missing | Attempted to trace a request but spec field is missing. Fill in the spec field. | +| 400 | The provided request is empty | Check if request in the GraphQL playground is correct. | +| 401 | Authorization Field Missing | Check if the authorization field is missing. Check if the OAuth authorization field is missing. | +| 401 | Header missing | Check if header field exist when making request. | +| 401 | Key has expired, please renew | Current key has expired. Please request for a new key. | +| 401 | OAuth Client Id Empty | Fill in the Client ID field. | +| 401 | OAuth Client Secret Empty | Client secret is empty. Insert the required client secret. | +| 401 | Request signature verification failed | Possible empty signature header or validation failed. | +| 401 | Wrong Password | Enter the correct password. Contact an administrator if further help is needed. | +| 403 | Access to this API has been disallowed | Request access to the API from an administrator. | +| 403 | Access to this resource has been disallowed | Request access to the resource from an administrator. | +| 403 | Attempted access with non-existent cert | Check if authentication certificate exist. | +| 403 | Attempted administrative access with invalid or missing key! | Check if there is correct security credentials of the Tyk API. | +| 403 | Certificate with SHA256 $certID not allowed | Certificate ID is nil or invalid. Please have a valid certificate. | +| 403 | Client authorize request in with invalid redirect URI | Check if Auth Redirect URI is malformed or use a valid redirect URI. | +| 403 | Client TLS certificate is required | Check if theres multiple APIs on the same domain with no certificates. | +| 403 | Certificate has expired | Please update the certificate with one that is currently valid and has not expired. | +| 403 | Depth limit exceeded | Exceeded the depth limit that has been applied. Check the key/policy global limits and quota section or the API limits and quota section. | +| 403 | Empty Signature Header | Fill in a signature for auth keys. | +| 403 | Empty Signature Path | Check if path for signature is empty. | +| 403 | Failed with 403 after $x-amount of requests over quota | Process request off thread with quota or process request live with rate limit or process request off thread with rate limit. | +| 403 | Found an empty user ID in predefined base field claim user_id | Request with valid JWT/RSA or signature/empty user_id/sub claim, or signature/no base field or no sub or no id claim. | +| 403 | GraphQL Depth Limit Exceeded | Exceeded the depth limit that has been applied. Check the key/policy global limits and quota section or the API limits and quota section. | +| 403 | Invalid Token | Check if JWT token is valid and not malformed. | +| 403 | Invalid Signature Header | Insert correct signature header value. | +| 403 | Invalid Signature Path | Make sure signature path is correct and valid. | +| 403 | Key is not active, please renew | Create a new key. | +| 403 | Key not authorised: Unexpected signing method | Invalid JWT signature, JWT access with non-existent key. | +| 403 | Key not authorised: OAuth client access was revoked | Check if OAuth client exists. | +| 403 | Key not authorised: no matching policy | Request with invalid policy in JWT, or checking session and identity for valid key for openID. | +| 403 | No matching policy found in scope claim | Check if scope is wrong for JWT request. | +| 403 | Quota Exceeded | Quota limit has been exceeded. Check quota limit settings. | +| 403 | Run Go-plugin auth failed | Used an invalid token for authentication. Please use a valid token to authenticate. | +| 403 | This API version does not seem to exist | Attempted to extract version data from a request. Version does not exist when loading version data. | +| 403 | This organisation access has been disabled, please contact your API administrator | Organisation session is inactive. Contact API administrator. | +| 403 | This organisation quota has been exceeded, please contact your API administrator | Organisation's quota limit has been exceeded. Contact API administrator. | +| 403 | This organisation rate limit has been exceeded, please contact your API administrator | Organisation's rate limit has been exceeded. Contact API administrator. | +| 403 | TLS: bad certificate | Check if the certificates exist and have valid ID's. | +| 403 | Version Information not found | Checking version data from request. No default version has been set or found. | +| 404 | API doesn’t exist | Checking if API exists when rotating OauthClient or if ApiSpec value is nil. | +| 404 | API for this refresh token not found | When invalidating OAuth refresh or if ApiSpec value is nil. | +| 404 | API ID not found | Check if API ID exists in the Gateway. | +| 404 | API not found | Check if API exists. | +| 404 | Bundle not found | No bundles found within the Gateway. | +| 404 | Certificate with given SHA256 fingerprint not found | No certificates exist in the certificate manager list. | +| 404 | Couldn't find organisation session in active API list | Attempted to update session object. However, spec for organisation is nil. Make sure to have the correct organisation ID. | +| 404 | Error getting oauth client | See if OAuth client id exists in the system. | +| 404 | Key not found | Failed to update hashed key. | +| 404 | No such organisation found in Active API list | Make sure organisation ID is correct. | +| 404 | OAuth client doesn’t exist | Attempted to retrieve APIs for OAuth or client ID. Client ID was not found | +| 404 | OAuth client ID not found | Check if OAuth client ID exist in storage. Check if OAuth tokens or client details are valid. Failed to retrieve OAuth client list. Failed to revoke OAuth client list. | +| 404 | Org not found | Could not retrieve record of org ID or failed to delete org keys. Spec for org is nil, make sure orgID value is correct | +| 404 | Policy not found | Could not retrieve policy data. Make sure policy ID is correct. | +| 404 | There is no such key found | Check if key is already deleted. Check if hashed key has been deleted already. | +| 404 | Version Does Not Exist | Check if version path is filled and correct. | +| 405 | Malformed request body | Attempted a POST request with a malformed request body. Make sure the request body is valid. | +| 405 | Method not supported | Attempting to add a method that is not supported by our system. | +| 411 | Content length is required for this request | You need to provide the `Content-Length` field in the request header. | +| 429 | API Rate Limit Exceeded | Check the rate of the requests on the API level. Check the rate of requests on the API key (Auth token, certs, etc). | +| 499 | Client closed request | Check if the client closed the TCP connection | +| 500 | Cache invalidation failed | Attempted to scan or delete the cache, which failed, causing cache invalidation to fail. | +| 500 | Can't detect loop target | Verify target API exsists. Check if URL scheme is "tyk://". Refer to 404 errors | +| 500 | Could not write key data | Failed to update hashed key. Make sure key name is valid. | +| 500 | Delete failed | Attempted to delete policy with invalid filename. Attempted to delete API with invalid filename. Attempted to delete OAuth Client with incorrect OAuth client ID. | +| 500 | Due to enabled service policy source, please use the Dashboard API | Attempted to add/update a policy and rejected due to Policysource=service. Please use the Dashboard API. | +| 500 | Due to enabled use_dp_app_configs, please use Dashboard API | When trying to import OAS, when Dashboard config is set to true. Please use Dashboard API. | +| 500 | Error writing to key store | Attempted to update session with a new session. Make sure orgID is correct. | +| 500 | Failed to create file | When add/update policy, failed to create a file. Make sure the policy file path is correct | +| 500 | Failed to create key | Check if key already exist or if the key exists with a given certificate. Ensure security settings are correct | +| 500 | Failure in storing client data | Attempted to store data when creating a new OAuth client but failed. Make sure the storageID, or orgID is correct and valid. | +| 500 | Get client tokens failed | Failed to retrieve OAuth tokens. Make sure client ID is valid or keyName is valid. | +| 500 | Marshalling failed | Attempted to import printDef but failed. Marshalling of policy failed. Unmarshal object into the file failed when writing to file. | +| 500 | There was a problem proxying the request | Check if the target URL is unavailable to the Gateway. | +| 500 | Unmarshalling failed | Key creation failed. Failed to create OAuth client. Failed to update OAuth client. | +| 500 | Unsupported schema, unable to validate | Check if GraphQL schema is valid. | +| 500 | Upstreaming host lookup failed | Check if the target URL is not resolvable in DNS. | +| 503 | Service temporarily unavailable | Check if a circuit breaker middleware is enforced. | +| 503 | All hosts are down | Attempted to reverse proxy a URL rewrite to a scheme and host, but all the hosts in hostlist are down. | +| 504 | Upstream service reached hard timeout | Timeout awaiting response headers during a request round trip. | +| 507 | Status Insufficient Storage | Attempted to update an API through a POST request but failed to due insufficient storage. | + + +## Dashboard + +1. ##### Can't update policy. Please ensure at least one access rights setting is set + + **Description** + + Users receive this error when attempting to create a new Policy on the Dashboard. + + **Cause** + + The Access Rights field is a required setting for a policy. + + **Solution** + + Users should first [create a new API](/api-management/gateway-config-managing-classic#create-an-api) and then [create a new policy](/api-management/gateway-config-managing-classic#secure-an-api) with an existing API in the Access Rights. + +2. ##### Dashboard not showing any analytics data + + **Description** + + The user is unable to see analytics data from a particular time period in the Dashboard + + **Cause** + + Missing analytics data could be caused by a number of different reasons: + + * Gateway incorrectly configured + * Pump incorrectly configured + * Pump service not running + * Dashboard incorrectly configured + * MDCB incorrectly configured + * Browser caching stale data + + **Solution** + + **Gateway incorrectly configured** + + Ensure the Gateway `tyk.conf` has: + + * `enable_analytics` set to `true`. This sets the Gateway to record analytics data. + * `analytics_config.storage_expiration_time` set to a value larger than the Pump's `purge_delay`. This allows the analytics data to exist long enough in Redis to be processed by the Pump. + * `analytics_config.ignored_ips` set to `[]`. This ensures the Gateway will create analytics for requests from any IP address. + * `enforce_org_data_age` set to `false`. This prevents the data from being removed based on it reaching a certain age. + + **Pump incorrectly configured** + + Ensure the Pump `pump.conf` has: + + * `analytics_storage_type` set to `redis`. + * `analytics_storage_config` settings are set to the same Redis instance that the Gateway is connected to. + + **Pump service not running** + + Ensure the Pump service is running. + + **Dashboard incorrectly configured** + + Ensure the Dashboard `tyk_analytics.conf` has: + + * `mongo_url` set to the same MongoDB instance that the Pump is connected to. + + **MDCB incorrectly configured** + + For scenarios where MDCB is used, ensure the `sink.conf` has: + + * `analytics.mongo_url` set to the same MongoDB instance that the Dashboard is connected to. + * `forward_analytics_to_pump` set to the correct value for your solution. `false` if MDCB is directly recording the analytics itself, `true` if it is forwarding analytics data for the Pump to process. For the forwarding scenario, set the `storage` settings to the same Redis instance that the Pump is connected to. + + **Browser caching stale data** + + Try restarting your browser, or using a private session. + + You can also try restarting the Dashboard service. + + **Troubleshooting tip** + + Check if MongoDB contains analytics data by running the following query (but update the date parameter first): + + ```{.copyWrapper} + db.getCollection('tyk_analytics_aggregates').find({timestamp: {$gte: new ISODate("2016-09-26T23:59:00Z")}}) + ``` + + The query gets all aggregated analytics data from the date provided, so if you set it to yesterday you will get all data since yesterday. The data must be in the ISO format. + +3. ##### Fatal - Dashboard and portal domains cannot be the same + + **Description** + + The Tyk Dashboard service will not start and displays a fatal error as follows: + + ``` + FATAL Dashboard and portal domains cannot be the same. + Dashboard domain: tyk-dashboard.com, Portal domain: tyk-dashboard.com + ``` + + **Cause** + + Tyk's developer portal UI needs to run on either a different subdomain or different domain name to the dashboard UI. + + Tyk's Dashboard service may be run in a multi-tenant configuration, and each tenant may have their own developer portals. + + The Dashboard service determines which portal to load based on the `Host` header in the request by the browser. If this + conflicts with the hostname of the dashboard UI the dashboard service will not know whether to serve the dashboard or + developer portal. + + **Solution** + + Firstly, we will need to disable hostnames from within the Dashboard configuration file in order to get the dashboard + service started again. + + Change `host_config.enable_host_names` from `true` to `false` + ``` + "host_config": { + "enable_host_names": true, <------ CHANGE TO false + ... + ... + }, + ``` + + You should now be able to start the Dashboard service. + + Navigate to the Dashboard via it's public IP address and log-in. + + Change your portal domain to something different - e.g. `portal.tyk-dashboard.com` + + Edit the Dashboard configuration file to re-enable host names. + + Restart the Dashboard service. + +4. ##### Internal TIB SSO User unable to log in + + **Description** + + After creating an SSO Identity profile in the Tyk Dashboard, a user is unable to log in to the Dashboard or the Developer Portal + + **Cause** + + One potential cause is that the `DashboardCredential` setting has not been populated with the user's Tyk Dashboard API Access Credentials. + You can check this from: + + 1. From the Dashboard menu, select the Identity Management option + 2. Edit the profile you created + 3. Select the Raw Editor + 4. Check to see if the `DashboardCredential` setting is set + + DashboardCredentials + + + + **Workaround Solution** + + If, as above, the `DashboardCredential` setting is empty (`"DashboardCredential": ""`), you can manually add the user's Tyk Dashboard API Access Credentials by performing the following: + + 1. From the System Management > Users menu, select Actions > Edit from the user whose credentials you want to use + 2. Copy the **Tyk Dashboard API Access Credentials** value + + User API Access Credentials + + 3. Paste this into the Raw editor for the `DashboardCredential` setting. For example - `"DashboardCredential": "887dad0de40b4ff05b6b50739b311099"` + 4. Click **Update** + 5. The user should now be able to log in to the Dashboard/Portal + + + + + This issue is due to be fixed in an up coming release + + + +5. ##### Key object validation failed, most likely malformed input error + + **Description** + + The user is getting error as `Key object validation failed, most likely malformed input` when calling the Dashboard API. + + **Cause** + + Issue caused by invalid character passed in the JSON body of the request. + + **Solution** + + Validate the JSON using JSON validator. + + Further, please see [this community forum post](https://community.tyk.io/t/error-creating-new-api-through-dashboard-rest-api/1555/2) for additional guidance. + +6. ##### Port 5000 Errors in the Browser Console + + > **NOTE**: Port 5000 is no longer required from v2.9.3. + + **Description** + + You see a lot of `net::ERR_CONNECTION_REFUSED` errors in the browser console. + + **Cause** + + The Dashboard is trying to connect to `https://:5000/socket.io/?chan=ui_notifications` and you don't have port 5000 open. + + **Solution** + + Port 5000 is used for WebSocket connections for real-time Dashboard notifications. You can change the port by changing the default `notifications_listen_port` in your `tyk_analytics.conf`. Otherwise you can ignore the errors in the browser console. + + + +Port 5000 is only required if you need to enable the Tyk Gateway log viewer. + + + +7. ##### There was a problem updating your CNAMEβ€œ error in the Dashboard + + **Description** + + A user may find that they are unable to update a CNAME from within the Dashboard. The following error will appear in a pop-up: + + ``` + There was a problem updating your CNAME, please contact support + ``` + + **Cause** + + The UI for setting the domain name has a very strict validation, so it may just be rejecting this domain. + + **Solution** + + The best way to set the domain is to use the Tyk Dashboard Admin API, to obtain the organization object via a GET request and then update the object using a PUT request with the relevant CNAME added to the body of the request.[[1](/api-management/dashboard-configuration#organizations-api)] Restarting the process will then set the domain. + +8. ##### runtime error invalid memory address or nil pointer dereference + + **Description** + + When attempting to POST an OAuth Client to a newly generated API, user may receive the following stack trace: + + ``` + 2016/12/08 08:06:16 http: panic serving 172.18.0.4:46304: runtime error: invalid memory address or nil pointer dereference + goroutine 364279 [running]: + net/http.(*conn).serve.func1(0xc420569500) + panic(0xb0e780, 0xc420014040) + /usr/local/go/src/runtime/panic.go:458 +0x243 + main.createOauthClient(0xf58260, 0xc4203a41a0, 0xc4206764b0) + /home/tyk/go/src/github.com/lonelycode/tyk/api.go:1526 +0x64a + main.CheckIsAPIOwner.func1(0xf58260, 0xc4203a41a0, 0xc4206764b0) + /home/tyk/go/src/github.com/lonelycode/tyk/middleware_api_security_handler.go:24 +0x2ae + net/http.HandlerFunc.ServeHTTP(0xc420533e50, 0xf58260, 0xc4203a41a0, 0xc4206764b0) + /usr/local/go/src/net/http/server.go:1726 +0x44 + github.com/gorilla/mux.(*Router).ServeHTTP(0xc42061cdc0, 0xf58260, 0xc4203a41a0, 0xc4206764b0) + /home/tyk/go/src/github.com/gorilla/mux/mux.go:98 +0x255 + net/http.(*ServeMux).ServeHTTP(0xc420667290, 0xf58260, 0xc4203a41a0, 0xc4206764b0) + /usr/local/go/src/net/http/server.go:2022 +0x7f + net/http.serverHandler.ServeHTTP(0xc42000fc80, 0xf58260, 0xc4203a41a0, 0xc4206764b0) + /usr/local/go/src/net/http/server.go:2202 +0x7d + net/http.(*conn).serve(0xc420569500, 0xf58d20, 0xc42068bdc0) + /usr/local/go/src/net/http/server.go:1579 +0x4b7 + created by net/http.(*Server).Serve + /usr/local/go/src/net/http/server.go:2293 +0x44d + ``` + + **Cause** + + The API that the OAuth Client has been POSTed to either doesn't exist or hasn't had a chance to propagate throughout the system. + + **Solution** + + When creating a new OAuth Client, make sure that API it is created under exists. If the API was created recently, please wait a few minutes before attempting to create an OAuth Client under it. + +9. ##### ValueError No JSON object could be decoded" when running Dashboard Bootstrap script + + **Description** + + Users receive the following error message when attempting to run the bootstrap script in their Tyk instance: + + ``` + Traceback (most recent call last): + File """", line 1, in + File ""/usr/lib64/python2.7/json/__init__.py"", line 290, in load + **kw) + File ""/usr/lib64/python2.7/json/__init__.py"", line 338, in loads + return _default_decoder.decode(s) + File ""/usr/lib64/python2.7/json/decoder.py"", line 365, in decode + obj, end = self.raw_decode(s, idx=_w(s, 0).end()) + File ""/usr/lib64/python2.7/json/decoder.py"", line 383, in raw_decode + raise ValueError(""No JSON object could be decoded"") + ValueError: No JSON object could be decoded + ORGID: + Adding new user + Traceback (most recent call last): + File """", line 1, in + File ""/usr/lib64/python2.7/json/__init__.py"", line 290, in load + **kw) + File ""/usr/lib64/python2.7/json/__init__.py"", line 338, in loads + return _default_decoder.decode(s) + File ""/usr/lib64/python2.7/json/decoder.py"", line 365, in decode + obj, end = self.raw_decode(s, idx=_w(s, 0).end()) + File ""/usr/lib64/python2.7/json/decoder.py"", line 383, in raw_decode + raise ValueError(""No JSON object could be decoded"") + ValueError: No JSON object could be decoded + USER AUTH: + Traceback (most recent call last): + File """", line 1, in + File ""/usr/lib64/python2.7/json/__init__.py"", line 290, in load + **kw) + File ""/usr/lib64/python2.7/json/__init__.py"", line 338, in loads + return _default_decoder.decode(s) + File ""/usr/lib64/python2.7/json/decoder.py"", line 365, in decode + obj, end = self.raw_decode(s, idx=_w(s, 0).end()) + File ""/usr/lib64/python2.7/json/decoder.py"", line 383, in raw_decode + raise ValueError(""No JSON object could be decoded"") + ValueError: No JSON object could be decoded + NEW ID: + Setting password + DONE" + ``` + + **Cause** + + The bootstrap script requires a valid hostname and port number to generate a new login user. + + **Solution** + + Make sure that the correct hostname and port number used to run the bootstrap.sh script. An example command would be: `./bootstrap.sh new-tyk-instance.com:3000` + +10. ##### Dashboard bootstrap error + + Make sure you: + + Target the correct domain with your `bootstrap.sh` as it is very specific once you set up a Dashboard service with hostname set + + ```{.copyWrapper} + ./bootstrap.sh my-tyk-instance.com + + ``` + + Have checked the firewall rules in your instance and VPC to allow + port 3000 access. + +11. ##### How to find the policy ID for a created policy + + Open the Active Policies page in the Dashboard (System Management > Policies) and click **Edit** next to the name of the policy you've created. The policy ID should appear in the URL of the edit page that opens up. + +12. ##### How to Connect to DocumentDB with X.509 client cert + + As AWS DocumentDB runs with TLS enabled, we require a way to run it without disabling the TLS verification. + DocumentDB uses self-signed certs for verification, and provides a bundle with root certificates for this purpose, so we need a way to load this bundle. + + Additionally DocumentDB can't be exposed to the local machine outside of the Amazon Virtual Private Cloud (VPC), which means that even if verification is turned on, it will always fail since if we use a SSH tunnel or a similar method, the domain will differ from the original. Also, it can have [Mutual TLS](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls) enabled. + + So, in order to support it, we provide the following variables for both our [Tyk Analytics Dashboard](/tyk-dashboard/configuration) and [Tyk Pump](/api-management/tyk-pump#tyk-pump-configuration): + + * `mongo_ssl_ca_file` - path to the PEM file with trusted root certificates + * `mongo_ssl_pem_keyfile` - path to the PEM file which contains both client certificate and private key. This is required for Mutual TLS. + * `mongo_ssl_allow_invalid_hostnames` - ignore hostname check when it differs from the original (for example with SSH tunneling). The rest of the TLS verification will still be performed. + + + A working DocumentDB configuration looks like this (assuming that there is SSH tunnel, proxying to 27018 port). + + ```{.json} + "mongo_url": "mongodb://testest:testtest@127.0.0.1:27018/tyk_analytics?connect=direct", + "mongo_use_ssl": true, + "mongo_ssl_insecure_skip_verify": false, + "mongo_ssl_ca_file": "/rds-combined-ca-bundle.pem", + "mongo_ssl_allow_invalid_hostnames": true, + ``` + + **Capped Collections** + + If you are using DocumentDB, [capped collections](/api-management/tyk-pump#tyk-pump-capping-analytics-data-storage) are not supported. See [here](https://docs.aws.amazon.com/documentdb/latest/developerguide/mongo-apis.html) for more details. + +13. ##### How to disable an API + + You will need to GET the API from the Dashboard, then set `active` property to `false`, then PUT it back. + See [Dashboard API - API Definitions](/api-management/dashboard-configuration#manage-apis-api-definition) for more details on how to GET and PUT an API definition. + +14. ##### How to Setup CORS + + **Upstream service supports CORS** + If your upstream service supports CORS already then Tyk should ignore **OPTIONS** methods as these are pre-flights sent by the browser. In order to do that you should select **Options passthrough**, and **NOT CHECK** CORS in Tyk. + + - If you not do allow **OPTIONS** to pass through, it will cause Tyk to dump the options request upstream and reply with the service's response so you'll get an error similar to `no 'access-control-allow-origin' header is present on the requested resource`. + + - If you check **CORS** as well you'll get an error similar to this: + ``` + Failed to load https://ORG_NAME.cloud.tyk.io/YOUR_API: The 'Access-Control-Allow-Origin' header + contains multiple values 'http://UPSTREAM', but only one is allowed. Origin 'http://UPSTREAM' + is therefore not allowed access. Have the server send the header with a valid value, or, if an + opaque response serves your needs, set the request's mode to 'no-cors' to fetch the resource with + CORS disabled + ``` + This is because you have enabled CORS on the Api Definition and the upstream **also** supports CORS and so both add the header. + + + **Upstream does not handle CORS** + If your upstream does not handle CORS, you should let Tyk manage all CORS related headers and responses. In order to do that you should **enable CORS** in Tyk and **NOT ENABLE** Options pass through. + + To learn more, look for `CORS.options_passthrough` [here](/api-management/gateway-config-tyk-classic#cross-origin-resource-sharing-cors). + + + **CORS middleware is allowing headers which I did not allow** + This may be the case when you enable CORS but don't provide any headers explicitly (basically providing an empty array). In this case the CORS middleware will use some sensible defaults. + To allow all headers, you will need to provide `*` (although this is not recommended). + + The same can happen with Allowed Origins and Allowed Methods. Read more about it [here](/api-management/gateway-config-tyk-classic#cross-origin-resource-sharing-cors). + + **CORS middleware is blocking my authenticated request** + Please make sure that you did allow the authorization header name (e.g. `Authorization`) or else the request will be blocked by the CORS middleware. If you're having trouble on the developer portal with authenticated requests make sure to also allow the `Content-Type` header. + +15. ##### No Key information on the Dashboard + + Information relating to a given key doesn't automatically appear in the Dashboard for users who have switched from a Self-Managed installation to a Multi-Cloud setup. + + The stats for a key will never update in the Cloud for a Multi-Cloud installation. The Dashboard in this mode only sets the initial β€œmaster” values for a key and those keys are then propagated across the Multi-Cloud instances that are using them (for example, you may have multiple zones with independent Redis DBs) at which point they diverge from each other. + + To see the up to date stats for a token, the key must be queried via the Gateway API. + +16. ##### How to rename or move existing headers in a request + + To rename a header, or to move a value from one header to another (for example, moving an authentication token to a secondary place, or copying a value that gets replaced upstream) is easy with [context variables](/api-management/traffic-transformation/request-context-variables). Here is an example where we move the value of `X-Custom-Header` to a new header called `X-New-Custom-Header` in all requests. + + We do this by setting the following in our API Definition Version section: + ```{.copyWrapper} + "global_headers": { + "X-New-Custom-Header": "$tyk_context.headers_X_Custom_Header" + }, + "global_headers_remove": ["X-Custom-Header"], + ``` + + You can test the header with the following command. This assumes your API Authentication mode is set to open(keyless): + + ```{.copyWrapper} + curl -X GET \ + https://DOMAIN/LISTEN_PATH/get \ + -H 'content-type: application/json' \ + -H 'x-custom-header: Foo' \ + ``` + + + You can also do this via the Dashboard from the Endpoint Designer tab within the API Designer: + + rename header + +17. ##### How to run the Dashboard and portal on different ports + + Unfortunately its not possible to run the Dashboard and Portal on different ports, they must use the same port. + +18. ##### How to run two Gateways with docker-compose + + Managing a second Tyk Gateway with our [Tyk Pro Docker Demo](/deployment-and-operations/tyk-self-managed/tyk-demos-and-pocs/overview#docker-compose-setup) is a case of mounting the `tyk.conf` file into a new volume and declaring a new Gateway service but exposed on a different port. + You will need to make some minor modifications to `docker-compose.yml` and start your services as usual with `docker-compose up`. + + + + + This will only work with an appropriate license. The free license is for development purposes and would allow running Tyk's licensed platform with only one Gateway. If you want to test Tyk with more please contact us by email [info@tyk.io](mailto:info@tyk.io) and we will be happy to discuss your case and PoC requirements as well as providing a short period license. + + + + + **Add the following to `docker-compose.yml` (after the `tyk-gateway` definition)** + + ``` + tyk-gateway2: + image: tykio/tyk-gateway:latest + ports: + - "8081:8080" + networks: + - tyk + depends_on: + - tyk-redis + volumes: + ./confs/tyk.conf:/opt/tyk-gateway/tyk.conf + ``` + +19. ##### β€œPayload signature is invalid!β€œ error + + **Description** + + Users receive the error "Payload signature is invalid!” in their logs. + + **Cause** + + Users may not have enabled payload signatures in their settings after an upgrade. + + **Solution** + + See [System Payloads](/api-management/security-best-practices#sign-payloads) for more details. + +## Pump + +1. ##### Capturing detailed logs + + If you've seen the documentation for Tyk Dashboard's [log browser](/api-management/dashboard-configuration#activity-logs), then you'll also be wondering how to set up your Tyk configuration to enable detailed request logging. + + **What is detailed request logging?** + + When [detailed request logging](/api-management/logs-metrics#capturing-detailed-logs) is enabled, Tyk will record the request and response in wire-format in the analytics database. This can be very useful when trying to debug API requests to see what went wrong for a user or client. + + This mode is configured in the gateway and can be enabled at the [system](/api-management/logs-metrics#configure-at-gateway-level), [API](/api-management/logs-metrics#configure-at-api-level) or [access key](/api-management/logs-metrics#configure-at-key-level) level. + + You will also need your Tyk Pump configured to move data into your preferred data store. + + **Disabling detailed recording for a particular pump** + + In some cases, you don't want to send the detailed request and response to a particular data store. + In order to do that, you can set `omit_detailed_recording` in your Tyk Pump configuration file to `true`. This will disable the detailed logging for a specific pump. + + For example, if we have an ElasticSearch, Kafka and CSV stores, and you want to save the detailed recording in all of them except Kafka you can use the following configuration: + + Enable detailed analytics on the Gateway `tyk.conf` using: + ```{.copyWrapper} + "enable_analytics" : true, + "analytics_config": { + "enable_detailed_recording": true + } + ``` + - Configure each pump on `pump.conf`. + - Add the `omit_detailed_recording` variable to the Kafka pump: + ```{.copyWrapper} + "pumps": { + "kafka": { + "type": "kafka", + "omit_detailed_recording":"true" + "meta": { + ... + } + }, + ... + }, + ``` + +2. ##### Connection dropped, connecting... + + **Description** + + Users may notice the following message in their logs for the Tyk Pump: + + ``` + [Jun 3 22:48:02] INFO elasticsearch-pump: Elasticsearch Index: tyk_analytics + [Jun 3 22:48:02] INFO main: Init Pump: Elasticsearch Pump + [Jun 3 22:48:02] INFO main: Starting purge loop @10(s) + [Jun 3 22:48:12] WARN redis: Connection dropped, connecting.. + [Jun 3 22:48:23] INFO elasticsearch-pump: Writing 1386 records + [Jun 3 22:50:11] INFO elasticsearch-pump: Writing 13956 records + ``` + + **Cause** + + This is normal behavior for the Tyk Pump. + + **Solution** + + N/A + +3. ##### Data Seen in Log Browser but No Reports + + **Description** + + You can see data in the log browser but the rest of the reports display nothing. + + **Solution** + + If your Pump is configured to use `mongo_selective_pump` (e.g. store data in a collection per organization), ensure that the [Dashboard configuration setting](/tyk-dashboard/configuration) `use_sharded_analytics` is set to `true`. + + The same applies in the reverse direction. If you are using `mongo-pump-aggregate` in your [pump configuration](/api-management/tyk-pump#tyk-pump-configuration), set `use_sharded_analytics` to false. + + This is because you've enabled `use_sharded_analytics` as per above and you're using the `mongo-pump-aggregate`, but you now also have to add a `mongo-pump-selective` in order to save individual requests to Mongo, which the Dashboard can read into the Log Browser. + +4. ##### No Elasticsearch node available + + **Description** + + Tyk Pump is configured to use Elasticsearch, but it does not work and shows `no Elasticsearch node available` message in log. + + ``` + tyk-pump[68354]: time="Aug 30 15:19:36" level=error msg="Elasticsearch connection failed: no Elasticsearch node available" + ``` + + **Cause** + + The `elasticsearch_url` configuration property in the `pump.conf` is missing the HTTP prefix e.g. + + ``` + "elasticsearch_url": "127.0.0.1:9200" + ``` + + **Solution** + + Ensure the HTTP prefix is present in the `elasticsearch_url` configuration property e.g. + + ``` + "elasticsearch_url": "http://127.0.0.1:9200" + ``` + +5. ##### Tyk Pump Panic β€œstack exceeds 1000000000-byte limitβ€œ + + **Description** + + Users receive a the aforementioned error message in a stack trace in the Pump. + + **Cause** + + Users receive a the aforementioned error message in a stack trace in the Pump. + + **Solution** + + Users are advised to upgrade to the latest version of Tyk. They must also ensure that their Pump is configured with a `purge_delay` and an `optimisation_max_active` value that's greater than 0. Packages are available to download from [Packagecloud.io](https://packagecloud.io/tyk) and further details on how to upgrade can be found [here](/developer-support/upgrading) + +6. ##### Pump overloaded + + **Description** + + The Tyk Pump cannot deal with amount of analytics data generated by the Gateway. This means the Pump is unable to process all the analytics data within the purge period. + + **Cause** + + If there is excessive analytics data, the pump may become overwhelmed and not able to move the data from Redis to the target data store. + + **Solution** + + There are many ways to approach solving this problem. + + **Scale the Pump** + + Scale the Pump by either increasing the CPU capacity of the Pump host or by adding more Pump instances. + + By adding more instances you are spreading the load of processing analytics records across multiple hosts, which will increase processing capacity. + + **Disable detailed analytics recording** + + Set `analytics_config.enable_detailed_recording` to `false` in the Gateway configuration file `tyk.conf`. Detailed analytics records contain much more data and are more expensive to process, by disabling detailed analytics recording the Pump will be able to process higher volumes of data. + + **Reduce the Pump purge delay** + + Set `purge_delay` to a low value e.g. `1` in the Pump configuration file `pump.conf`. This value is the number of seconds the Pump waits between checking for analytics data. Setting it to a low value will prevent the analytics data set from growing too large as the pump will purge the records more frequently. + + **Reduce analytics record expiry time** + + Set `analytics_config.storage_expiration_time` to a low value e.g. `5` in the Gateway configuration file `tyk.conf`. This value is the number of seconds beyond which analytics records will be deleted from the database. The value must be higher than the `purge_delay` set for the Pump. This will allow for analytics records to be discarded in the scenario that the system is becoming overwhelmed. Note that this results in analytics record loss, but will help prevent degraded system performance. + +## Streams + +1. ##### Failure to connect to the event broker + + If Tyk Gateway is unable to establish a connection to the configured event broker (e.g., Kafka, MQTT), check the following: + - Verify that the broker connection details in the Tyk Dashboard are correct, including the hostname, port, and any required credentials. + - Ensure that the event broker is running and accessible from the Tyk Gateway instance. + - Check the network connectivity between the Tyk Gateway and the event broker. Use tools like telnet or nc to validate the connection. + +2. ##### Messages are not being published or consumed + + If messages are not being successfully published to or consumed from the event broker, consider the following: + - Verify that the topic or queue names are correctly configured in the Tyk Dashboard and match the expected values in the event broker. + - Check the Tyk Gateway logs for any error messages related to message publishing or consumption. Adjust the log level to "debug" for more detailed information. + - Validate that the message format and schema match the expectations of the consumer or producer. Inspect the message payloads and ensure compatibility. + +3. ##### Async API performance is poor or connections are being throttled + + If you observe performance issues or connection throttling with async APIs, consider the following: + - Review the configured rate limits and quotas for the async API. Adjust the limits if necessary to accommodate the expected traffic. + - Monitor the resource utilization of the Tyk Gateway instances and the event broker. Ensure that there is sufficient capacity to handle the load. + - Consider scaling the Tyk Gateway horizontally by adding more instances to distribute the traffic load. + +4. ##### What are best practices of using Tyk Streams + + - Use meaningful and descriptive names for your async APIs, topics, and subscriptions to improve readability and maintainability. + - Implement proper security measures, such as authentication and authorization, to protect your async APIs and restrict access to authorized clients only. + - Set appropriate rate limits and quotas to prevent abuse and ensure fair usage of the async API resources. + - Monitor the performance and health of your async APIs using Tyk's built-in analytics and monitoring capabilities. Set up alerts and notifications for critical events. + - Version your async APIs to manage compatibility and enable seamless updates without disrupting existing clients. + - Provide comprehensive documentation for your async APIs, including details on message formats, schemas and example payloads, to assist developers in integrating with your APIs effectively. + + +## Debugging Series + +### MongoDB + +Tyk uses Mongo as a database to store much of its analytical data. This means if you have a dashboard instance that is down, there’s a high chance that this is because of either Mongo being down or an issue with your dashboard connecting to Mongo. + +Here, we'll outline the following: + + - How to isolate Mongo as the root of the error + - The steps to take to help stop your system from going down. + +1. ##### Isolating Mongo as the fault + + Here are a few ways to identify Mongo as the source of the problem: + + 1. Analytics is not showing up on the dashboard + 2. When hitting the `/hello` endpoint, the dashboard is down + 3. The Mongo database size is hitting hardware resource limits. + +2. ##### Mongo status + + Similarly to Tyk, Mongo has a health check that we can run to get the status of our Mongo instance. This should be a starting point for debugging Mongo (depending on which system): + + - `Sudo systemctl status mongod` or `sudo service mongodb status` + - Logs under `/var/log/mongo/mongo.log` should also outline any outage + +3. ##### Mongo version + + Does Tyk support the version of Mongo that you’re using? Read more about that [here](/planning-for-production/database-settings#mongodb). + +4. ##### Capped collections + + Suppose a Mongo instance runs over a long period in addition to a lot of traffic in a Tyk system. In that case, the chances of the collections growing out of control are very real - especially the `tyk_analytics` collections. + + In some cases, `enable_detailed_logging: true` adds fuel to the fire, as this parameter should only be set temporarily during debugging. This configuration exists on the gateway and the API levels, so ensure this is off after debugging. + + We advise everyone to cap every collection in Mongo, as this prevents collections from growing out of control and bringing your dashboard down by hitting resource limits. + + You can determine each collection's cap size by visiting our [MongoDB sizing calculator](/planning-for-production/database-settings#mongodb-sizing-guidelines). + + Here’s more information on how and why you want to [cap your collections](https://www.mongodb.com/docs/manual/core/capped-collections/). + +5. ##### Size caps versus TTL-capped collections + + Are you trying to decide between capping your collections or by size? It depends on a couple of factors. Ultimately, both settings will get rid of older data, so it’s based on how far back you need to view it. + + Assuming you only need data for a few days, then using a TTL will be the best route, as it will only allow your collections to grow that wild over a short period. + + Alternatively, if you care about how big the collections grow and want to see longer-lived data, then capping by size is your best direction. This will limit the collection to developing within a controlled resource limit. And in the context of aggregate analytics, this collection will hold data for long periods. + + One thing to note here is that if you head down the TTL route, and if your environment has A LOT of traffic, then your collections can grow wild and fast, while a size-capped collection will always stay within a known size limit. + +6. ##### Handling overgrown, uncapped collections + + There are three ways to do this: + + 1. The first method is to delete (drop) the collection and create a new collection with a cap (commands below). + + ```bash + # This will drop a collection. When using this, cached data will not be deleted. + db..drop() + ``` + + ```bash + # Can use the below call. Drops the collection and removes any cache data + db..remove() + ``` + + 2. The second method is to rename the collection to a random name and then create a new collection with a cap. Then restart Mongo with a larger size (we do this because the overgrown collections still exist). This is to confirm that the collection size grew too large and dropped the Mongo connection. The renaming also helps conserve the existing data if you still need it (but it will be useless in the background unless you attempt the third method). + + 3. The third method is to delete (deleteMany() call below) the old data to trim down their collection size. Then, you can restart your instance to see if the connection goes up again. + + ```bash + # Will delete data off a collection that does NOT have a cap. Otherwise, it will throw an error. + db..deleteMany() + ``` + +7. ##### Secure Mongo connection + + You will use a secured connection to your Mongo instance in most production cases. Here are a few things to consider: + + - Verify there isn’t a network issue that stops your dashboard from connecting to Mongo. You can do this by hitting the dashboard server from your Mongo server (or vice versa) + + - Validate certificate and `.pem` files + + - Connect (command below) to Mongo with certificates + + ```bash + # Replace the above files with the correct parameters (proper file paths and host). + mongo --ssl --sslCAFile /opt/mongodb/ssl/ca.pem --sslPEMKeyFile /opt/mongodb/ssl/mongodb.pem --host 127.0.0.1 + ``` + - Verify Pump has the correct parameters to include your certificates + + - Verify your dashboard has the correct parameters relative to your environment: + + ```json + "mongo_url": "mongodb://localhost/tyk_analytics", + "mongo_use_ssl": true, + "mongo_ssl_ca_file": "/opt/mongodb/ssl/ca.pem", + "mongo_ssl_pem_keyfile": "/opt/mongodb/ssl/mongodb.pem", + "mongo_ssl_insecure_skip_verify": true + ``` + +8. ##### How to Cap analytics data storage + + What methods are available to enable me to manage my MongoDB analytics storage? + + [Time Based Caps](/api-management/tyk-pump#time-based-cap-in-single-tenant-environments) + + [Size Based Caps](/api-management/tyk-pump#size-based-cap) + + + + + Time based caps (TTL indexes) are incompatible with already configured size based caps. + + + + + +If you are using DocumentDB, capped collections are not supported. See [here](https://docs.aws.amazon.com/documentdb/latest/developerguide/mongo-apis.html) for more details. + + + +9. ##### MongoDB X.509 Client Authentication + + You can use the *MongoDB X509 Certificate* flow to authenticate the *Tyk Dashboard*, *Tyk Pump*, and *Tyk MDCB* with your *MongoDB* install. This is slightly different from [AWS DocumentDB setup instructions](/api-management/troubleshooting-debugging#how-to-connect-to-documentdb-with-x509-client-cert). + + Before we get into the configuration, we need to understand the two key components: connection strings and certificates. + + 1. **Connection Strings** + + 1) You must specify a username (and password if needed) in the connection string. [Why do you need a username at all?](https://docs.mongodb.com/manual/tutorial/configure-x509-client-authentication/) + + 2) We must specify the following parameters: `?authSource=$external&authMechanism=MONGODB-X509"` + + **An example of a connection string would be:** + + ```bash + "mongodb://CN=tyk-mongo-client,OU=TykTest@:/?authSource=$external&authMechanism=MONGODB-X509" + ``` + + ##### Passwords + If you have to include a password, you can do it after the username in basic auth format: + + ```bash + "mongodb://CN=tyk-mongo-client,OU=TykTest,O=TykTest:mypassword@:/?authSource=$external&authMechanism=MONGODB-X509" + ``` + + ##### URL Encoding Protected Characters + Note that you must URL encode the `:` character into `%40`. So replace any `:` in the username field into the URL encoded version. + + 2. **Certificates** + + You'll need to provide two certificates to complete the X509 Client Authentication: + + **CA Cert** containing just the public key of the Certificate Authority (CA). + + **Client Cert** containing both the public and private keys of the client. + + ##### Configuration + + Here's what it looks like all put together: + + 1. **Tyk Dashboard** + + Your `tyk_analytics.conf` should include these fields at the root level: + + ```json + { + ... + "mongo_url": "mongodb://@:/?authSource=$external&authMechanism=MONGODB-X509", + "mongo_use_ssl": true, + "mongo_ssl_ca_file": "ca.pem", + "mongo_ssl_pem_keyfile": "client.pem" + } + ``` + + | Config File | Environment Variable | Type | Examples + | --- | -- | ---- | ---- | + | "mongo_url" | TYK_DB_MONGOURL | string | "mongodb://{username}@{host}:{port}/{db}?authSource=$external&authMechanism=MONGODB-X509" | + | "mongo_use_ssl" | TYK_DB_MONGOUSESSL | bool | true, false | + | "mongo_ssl_ca_file" | TYK_DB_MONGOSSLCAFILE | string | "certificates/ca.pem" | + | "mongo_ssl_pem_keyfile" | TYK_DB_MONGOSSLPEMKEYFILE | string | "certificates/key.pem" | + | "mongo_ssl_insecure_skip_verify" | TYK_DB_MONGOSSLINSECURESKIPVERIFY | bool | true, false | + | "mongo_ssl_allow_invalid_hostnames" | TYK_DB_MONGOSSLALLOWINVALIDHOSTNAMES | bool | true, false | + | "mongo_session_consistency" | TYK_DB_MONGOSESSIONCONSISTENCY | string | "strong", "eventual", or "monotonic". default is "strong" | + | "mongo_batch_size" | TYK_DB_MONGOBATCHSIZE | int | Default "2000", min "100" | + + 2. **Tyk Pump** + + Tyk offers three different MongoDB pumps (`mongo`, `mongo_aggregate`, and `mongo_selective`), each of which must be separately configured for X509 certificate authentication. + + The following fields must be set under the `meta` section of each pump (or set as environment variable): + + ```yaml + { + ... + "pumps": { + "mongo": { + "type": "mongo", + "meta": { + "collection_name": "tyk_analytics", + "mongo_url": "mongodb://CN=tyk-mongo-client,OU=TykTest@:/?authSource=$external&authMechanism=MONGODB-X509", + "mongo_use_ssl": true, + "mongo_ssl_ca_file": "ca.pem", + "mongo_ssl_pem_keyfile": "client.pem" + } + } + } + } + ``` + + In addition to the other configs, these are the ones related to MongoDB: + + | Config File | Type | Examples + | -- | -- | -- + "mongo_url" | string | "mongodb://{username}@{host}:{port}/{db}?authSource=$external&authMechanism=MONGODB-X509" | + "mongo_use_ssl" | bool | true, false | + "mongo_ssl_ca_file" | string | "certificates/ca.pem" | + β€œmongo_ssl_pem_keyfile" | string | "certificates/key.pem" | + "mongo_ssl_insecure_skip_verify" | bool | true, false | + "mongo_ssl_allow_invalid_hostnames" | bool | true, false | + + 3. **Tyk MDCB** + + As of Tyk MDCB v1.8.0, you have been able to secure Tyk MDCB with MongoDB using X509 Certificate Authentication flow. + + The config settings are exactly the same as the Tyk Dashboard steps, just nested one level deeper: + + **Example Config:** + ```json + { + ... + "analytics": { + "mongo_url": "mongodb://CN=tyk-mongo-client,OU=TykTest@:/?authSource=$external&authMechanism=MONGODB-X509", + "mongo_use_ssl": true, + "mongo_ssl_ca_file": "ca.pem", + "mongo_ssl_pem_keyfile": "client.pem" + } + } + ``` + | Config File | Environment Variable | Type | Examples + | --- | -- | ---- | ---- | + "analytics.mongo_url" | TYK_MDCB_ANALYTICSCONFIG_MONGOURL | string | "mongodb://{username}@{host}:{port}/{db}?authSource=$external&authMechanism=MONGODB-X509" + "analytics.mongo_use_ssl" | TYK_MDCB_ANALYTICSCONFIG_MONGOUSESSL | bool | true, false | + "analytics.mongo_ssl_ca_file" | TYK_MDCB_ANALYTICSCONFIG_MONGOSSLCAFILE | string | "certificates/ca.pem" | + "analytics.mongo_ssl_pem_keyfile" | TYK_MDCB_ANALYTICSCONFIG_MONGOSSLPEMKEYFILE | string | "certificates/key.pem" | + "analytics.mongo_ssl_insecure_skip_verify" | TYK_MDCB_ANALYTICSCONFIG_MONGOSSLINSECURESKIPVERIFY | bool | true, false | + "analytics.mongo_ssl_allow_invalid_hostnames" | TYK_MDCB_ANALYTICSCONFIG_MONGOSSLALLOWINVALIDHOSTNAMES | bool | true, false | + "analytics.mongo_session_consistency" | TYK_MDCB_ANALYTICSCONFIG_MONGOSESSIONCONSISTENCY | string | "strong", "eventual", or "monotonic". default is "strong" | + "analytics.mongo_batch_size" | TYK_MDCB_ANALYTICSCONFIG_MONGOBATCHSIZE | int | Default "2000", min "100" | + +### Tyk Self-Managed + +This guide should help a user of Tyk Self-Managed in debugging common issues. A helpful way to go about this is by: + +1. Isolating your components to see where the error is coming from +2. Enabling debug logs to ensure you get all the information you need + +1. ##### Gateway `/hello` endpoint + + Querying the gateway's `/hello` health endpoint is the quickest way to determine the status of your Tyk instance. You can find more information in our docs about the [Gateway Liveness health check](/planning-for-production/ensure-high-availability/health-check). + + This endpoint is important as it allows the user to isolate the problem's origin. At a glance, the `/hello` endpoint reports the Gateways connectivity to Redis, and the control plane components eg. Tyk Dashboard, Tyk Multi-Data Center Bridge (MDCB), and Tyk Cloud. + + ```json + { + "status": "pass", + "version": "v5.0", + "description": "Tyk GW", + "details":{ + "dashboard":{ + "status": "pass", + "componentType": "system", + "time": "2023-01-13T14:45:00Z" + }, + "redis":{ + "status": "pass", + "componentType": "datastore", + "time": "2023-01-13T14:45:00Z" + } + }, + "rpc": { + "status": "pass", + "componentType": "system", + "time": "2023-01-13T14:45:00Z" + } + } + ``` + + If the Dashboard or RPC connectivity fails (control plane components), the Gateway will still function based on the last received configurations from those components. However, if Redis fails, Gateway will go down since it is a hard dependency. + +#### Debug Logs + +Setting the log level to debug will allow for more descriptive logs that will give a better context around any issue you might be facing. For example, here are the different outputs you receive when calling an Open Keyless API with `info` and `debug` log-level modes. + +Here is the output when using `info` as the log level: + +```bash +tyk-pump | time="Jan 24 14:39:19" level=info msg="Purged 1 records..." prefix=mongo-pump +tyk-pump | time="Jan 24 14:39:19" level=info msg="Purged 1 records..." prefix=mongo-pump-selective +tyk-mongo | 2023-01-24T14:39:19.228+0000 I NETWORK [listener] connection accepted from 172.20.0.2:51028 #19 (19 connections now open) +tyk-pump | time="Jan 24 14:39:19" level=info msg="Completed upserting" collection="tyk_analytics_aggregates" prefix=mongo-pump-aggregate +tyk-pump | time="Jan 24 14:39:19" level=info msg="Purged 1 records..." prefix=mongo-pump-aggregate +``` + +Here is a more detailed output of the same call when using `debug` as the log level: + +```bash +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Started proxy" +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Stripping proxy listen path: /api1/" +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Upstream path is: /get" +tyk-gateway | time="Jan 24 14:32:19" level=debug msg=Started api_id=63666619de884d0563ee3ccc67d57929 api_name=api1 mw=ReverseProxy org_id=63ca963f6888c7000191890e ts=1674570739659369736 +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Upstream request URL: /get" api_id=63666619de884d0563ee3ccc67d57929 api_name=api1 mw=ReverseProxy org_id=63ca963f6888c7000191890e +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Outbound request URL: http://httpbin.org/get" api_id=63666619de884d0563ee3ccc67d57929 api_name=api1 mw=ReverseProxy org_id=63ca963f6888c7000191890e +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Creating new transport" api_id=63666619de884d0563ee3ccc67d57929 api_name=api1 mw=ReverseProxy org_id=63ca963f6888c7000191890e +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Out request url: http://httpbin.org/get" api_id=63666619de884d0563ee3ccc67d57929 api_name=api1 mw=ReverseProxy org_id=63ca963f6888c7000191890e +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Request is not cacheable" mw=ResponseCacheMiddleware +tyk-gateway | time="Jan 24 14:32:19" level=debug msg=Finished api_id=63666619de884d0563ee3ccc67d57929 api_name=api1 mw=ReverseProxy ns=316559477 org_id=63ca963f6888c7000191890e +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Upstream request took (ms): 316.639871" +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Checking: 63ca963f6888c7000191890e" api_id=63666619de884d0563ee3ccc67d57929 api_name=api1 org_id=63ca963f6888c7000191890e +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="no cached entry found, returning 7 days" api_id=63666619de884d0563ee3ccc67d57929 api_name=api1 org_id=63ca963f6888c7000191890e +tyk-gateway | time="Jan 24 14:32:19" level=debug msg="Done proxy" +tyk-pump | time="Jan 24 14:32:20" level=info msg="Purged 0 records..." prefix=mongo-pump-aggregate +tyk-pump | time="Jan 24 14:32:20" level=info msg="Purged 1 records..." prefix=mongo-pump-selective +tyk-pump | time="Jan 24 14:32:20" level=info msg="Completed purging the records" collection="tyk_analytics" number of records=1 prefix=mongo-pump +tyk-pump | time="Jan 24 14:32:20" level=info msg="Purged 1 records..." prefix=mongo-pump +tyk-mongo | 2023-01-24T14:32:20.398+0000 I NETWORK [listener] connection accepted from 172.20.0.3:54712 #19 (19 connections now open) +tyk-pump | time="Jan 24 14:32:20" level=info msg="Completed upserting" collection="tyk_analytics_aggregates" prefix=mongo-pump-aggregate +tyk-pump | time="Jan 24 14:32:20" level=info msg="Purged 1 records..." prefix=mongo-pump-aggregate + +``` + +As shown above, the `debug` log level mode provides more information which will help during your debugging stage, i.e when the API call was started, when it was finished, how long it took for the call to finish, the endpoint that was called, the upstream that was called, the organization that the API belongs to, and more. + +1. ##### Gateway Debug Settings + + If you’re using a `*.conf` for your configuration parameters: + + ```json + "log_level": "debug" + ``` + + If you’re using environment variables for your configuration: + + ```bash + TYK_GW_LOGLEVEL=debug + ``` + + If you're using Tyk Helm Charts. Add the following items to your `values.yaml`: + + ```yaml + extraEnvs: + - name: TYK_LOGLEVEL + value: debug + ``` + +2. ##### Dashboard Debug Settings + + If you’re using a `*.conf` for your configuration parameters: + + ```json + "log_level": "debug" + ``` + + If you’re using environment variables for your configuration: + + ``` + TYK_DB_LOGLEVEL=debug + ``` + + If you're using Tyk Helm Charts. Add the following items to your `values.yaml`: + + ```yaml + extraEnvs: + - name: TYK_LOGLEVEL + value: debug + ``` + + You can find the full [log levels](/api-management/logs-metrics#system-logs) in our documentation. + +#### Versions + +You can access all Tyk release information on the [release notes](/developer-support/release-notes/overview) overview page. + +We recommend always using the [Long-Term Support (LTS) release](/developer-support/release-types/long-term-support) for stability and long term support. + +##### Non-LTS versions +Tyk is backwards compatible, upgrading to newer versions won't turn on new features or change the behavior of your existing environment. + +For the best experience when experimenting with Tyk and exploring its latest capabilities, you can use our latest version. You can access all Tyk releases on the [release notes summary](/developer-support/release-notes/overview) page. + +#### Dashboard + +The Dashboard front-end (GUI included) uses [Tyk Dashboard API](/tyk-dashboard-api) to retrieve data to display or update. This means you can use the [developer tools on your browser](https://developer.mozilla.org/en-US/docs/Learn/Common_questions/Tools_and_setup/What_are_browser_developer_tools) to access the API and its information. Looking into the API details, the URL, the headers, the payload and the response can help you investigate the source of the issue and replicate it with API calls using an HTTP client such as [cURL](https://curl.se/) or [Postman](https://www.postman.com/). +As a next step to this investigation, if that specific endpoint exists also in [Tyk Gateway API](/tyk-gateway-api), you can compare the responses from both gateway and dashboard requests. + +##### Isolating + +As mentioned above, errors can happen in any of the components of your Tyk deployment, and as such, one of the critical things you'll pick up during your debugging phase is isolating these environments. + +##### Dashboard Level + +When debugging an issue, in order to isolate the gateway from the Dashboard, try to call the same API ednpoint on both Tyk Dashboard and Tyk Gateway +If it works with the gateway API only, then the issue is likely to be in the Dashboard. It could be that you need to set in the Dashboard some [configuration parameters](/tyk-dashboard/configuration) (using the config file or via environment variables). + +##### Gateway or API level + +Are you making calls against your gateway or API, and it's not working? Try isolating the gateway from everything else. Often you'll see that the gateway or API aren't at fault and that it's something else; it can be the load balancer you have in your environment blocking the call from ever reaching it. + +In the case of the API error-ing out, you can also isolate it by: + +- Creating a generic Httpbin API and calling it + - If this works, then the API configuration or the backend is at fault +- Changing the target URL of the API + - The upstream API can be at fault +- Assuming your API has a plugin, take away the plugin and test the API + - The error most likely exists in the plugin +- If the error exists in your plugin, try taking out certain parts of the code and testing it with minimal logic + - This means that part of your code with integrated logic is incorrect +- Is the target URL the same in another one of your APIs? + - The gateway sees the API as duplicated and changes the new target URL causing the gateway to error. + +You will eventually hit the point of error by further isolating parts of your API. + diff --git a/api-management/tyk-pump.mdx b/api-management/tyk-pump.mdx new file mode 100644 index 000000000..405509b56 --- /dev/null +++ b/api-management/tyk-pump.mdx @@ -0,0 +1,1835 @@ +--- +title: "Tyk Pump - Export Metrics to Persistent Datastore" +description: "How to configure Tyk Pump" +keywords: "Pump, CSV, Datadog, Elasticsearch, Logzio, Moesif, Splunk, Prometheus, Analytics Storage, Monitoring, Observability" +sidebarTitle: "Metrics Exporter" +--- + +## Introduction + +Traffic analytics are captured by the Gateway nodes and then temporarily stored in Redis. The Tyk Pump is responsible for moving those analytics into a persistent data store, such as MongoDB, where the traffic can be analyzed. + +## What is the Tyk Pump? + +The Tyk Pump is our [open source](https://github.com/TykTechnologies/tyk-pump) analytics purger that moves the data generated by your Tyk nodes to any back-end. It is primarily used to display your analytics data in the Tyk Dashboard. + + + +The Tyk Pump is not currently configurable in our Tyk Cloud solution. + + + +### Tyk Pump Data Flow + +Here's the architecture depending on your deployment model: + + + + +Tyk Enterprise Pump Architecture + + + + +Tyk Open Source Pump Architecture + + + + +Tyk-Pump is both extensible, and flexible- meaning it is possible to configure Tyk-Pump to send data to multiple different backends at the same time as depicted by Pump Backends (i) and (ii), MongoDB and Elasticsearch respectively in Figure 1. Tyk-Pump is scalable, both horizontally and vertically, as indicated by Instances "1", "2", and "n". Additionally, it is possible to apply filters that dictate WHAT analytics go WHERE, please see the [docs on sharded analytics configuration here](/api-management/tyk-pump#configuring-the-sharded-analytics). + +| Configuration and Scaling of Tyk Pump | +| :-- | +| Figure 1: An architecture diagram illustrating horizontal scaling of "n" Instances of Tyk-Pump each with two different backends. | + +### Other Supported Backend Services + +We list our [supported backends here](/api-management/tyk-pump#external-data-stores). + +### Configuring your Tyk Pump + +See [Tyk Pump Configuration](/api-management/tyk-pump#tyk-pump-configuration) for more details on setting up your Tyk Pump. + +Tyk Pump can be horizontally scaled without causing duplicate data, please see the following Table for the supported permutations of Tyk Pump scaling. + +| Supported | Summary | +| :-- | :-- | +| βœ… | Single Pump Instance, Single Backend | +| βœ… | Single Pump Instance, Multiple Backend(s) | +| βœ… | Multiple Pump Instances, Same Backend(s)| +| ❌ | Multiple Pump Instances, Different Backend(s) | + +## Getting Started + +### Tyk Pump Configuration + +The Tyk Pump is our Open Source analytics purger that moves the data generated by your Tyk nodes to any back-end. By moving the analytics into your supported database, it allows the Tyk Dashboard to display traffic analytics across all your Tyk Gateways. + +#### Tyk Dashboard + +##### MongoDB + +The Tyk Dashboard uses the `mongo-pump-aggregate` collection to display analytics. This is different than the standard `mongo` pump plugin that will store individual analytic items into MongoDB. The aggregate functionality was built to be fast, as querying raw analytics is expensive in large data sets. See [Pump Dashboard Config](/api-management/tyk-pump#setup-dashboard-analytics) for more details. + +##### SQL + + + +Tyk no longer supports SQLite as of Tyk 5.7.0. To avoid disruption, please transition to [PostgreSQL](/planning-for-production/database-settings#postgresql), [MongoDB](/planning-for-production/database-settings#mongodb), or one of the listed compatible alternatives. + + + +In v4.0 of the Tyk Dashboard, we added support for the following SQL platforms: +- PostgreSQL +- SQLite + +Within your Dashboard configuration file (`tyk-analytics.conf`) there is now a `storage` section. + +```{.shell} +{ + ... + "storage": { + "main":{}, + "analytics":{}, + "logs":{}, + "uptime": {} + } +} +``` +###### Field description + +- `main` - Main storage (APIs, Policies, Users, User Groups, etc.) +- `analytics` - Analytics storage (used for display all the charts and for all analytics screens) +- `logs` - Logs storage (log browser page) +- `uptime` - uptime tests analytics data + +###### Common settings + +For every `storage` section, you must populate the following fields: +```{.shell} +{ +... + "storage": { + ... + "main": { + "type": "postgres", + "connection_string": "user=root password=admin database=tyk-demo-db host=tyk-db port=5432", + } + } +} +``` +- `type` use this field to define your SQL platform (currently SQLite or PostgreSQL are supported) +- `connection_string` the specific connection settings for your platform + +The pump needed for storing logs data in the database, is very similar to other pumps as well as the storage setting in your Tyk Dashboard config. It just requires the `sql` name and database specific configuration options. + +####### SQL example + +```{.shell} +"sql": { + "name": "sql", + "meta": { + "type": "postgres", + "connection_string": "user=laurentiughiur password=test123 database=tyk-demo-db host=127.0.0.1 port=5432" + } +}, +``` + +#### Capping analytics data + +Tyk Gateways can generate a lot of analytics data. Be sure to read about [capping your Dashboard analytics](/api-management/tyk-pump#tyk-pump-capping-analytics-data-storage) + +#### Omitting the configuration file + +From Tyk Pump 1.5.1+, you can configure an environment variable to omit the configuration file with the `TYK_PMP_OMITCONFIGFILE` variable. +This is specially useful when using Docker, since by default, the Tyk Pump has a default configuration file with pre-loaded pumps. + +#### Sharding analytics to different data sinks + +In a multi-organization deployment, each organization, team, or environment might have their preferred analytics tooling. This capability allows the Tyk Pump to send analytics for different organizations or various APIs to different destinations. +E.g. Org A can send their analytics to MongoDB + DataDog +while Org B can send their analytics to DataDog + expose the Prometheus metrics endpoint. + +##### Configuring the sharded analytics + +You can achieve the sharding by setting both an allowlistt and a blocklist, meaning that some data sinks can receive information for all orgs, whereas other data sinks will not receive certain organization's analytics if it was block listed. + +This feature makes use of the field called `filters`, which can be defined per pump. This is its structure: +``` +"filters":{ + "api_ids":[], + "org_ids":[], + "skip_api_ids":[], + "skip_org_ids":[] + } +``` +- `api_ids` and `org_ids` works as allow list (APIs and orgs where we want to send the analytic records). +- `skip_api_ids` and `skip_org_ids` works as block list (APIs and orgs where we want to filter out and not send their the analytic records). + +The priority is always a blocklist over a allowlist. + +An example of configuration would be: + ``` +"csv": { + "type": "csv", + "filters": { + "org_ids": ["org1","org2"] + }, + "meta": { + "csv_dir": "./bar" + } +}, +"elasticsearch": { + "type": "elasticsearch", + "filters": { + "skip_api_ids": ["api_id_1"], + }, + "meta": { + "index_name": "tyk_analytics", + "elasticsearch_url": "https://elasticurl:9243", + "enable_sniffing": false, + "document_type": "tyk_analytics", + "rolling_index": false, + "extended_stats": false, + "version": "6" + } +} +``` +With this configuration, all the analytics records related to `org1` or `org2` will go to the `csv` backend and everything but analytics records from `api_id_1` to `elasticsearch`. + + +### Setup Dashboard Analytics + +To enable [Dashboard Analytics](/api-management/dashboard-configuration#traffic-analytics), you would need to configure Tyk Pump to send analytic data to the Dashboard storage MongoDB / SQL. + +These are the different pumps that handle different kinds of analytic data. + +| Analytics | Activities Graph | Log Browser | Uptime Analytics | +| :---------------------------- | :-------------------- | :-------------------- | :---------------- | +| Mongo (Multi organization) | Mongo Aggregate Pump | Mongo Selective Pump | Uptime Pump | +| Mongo (Single organization) | Mongo Aggregate Pump | Mongo Pump | Uptime Pump | +| SQL | SQL Aggregate Pump | SQL Pump | Uptime Pump | + +See below details about these pumps, their configs, matching collections and relevant dashboard setting, to view this data. + +#### MongoDB + +##### Mongo Pump + +**`mongo`** Pump simply saves all individual requests across every organization to a collection called **`tyk_analytics`**. Each request will be stored as a single document. + +###### Pump Config + +```yaml +{ + ... + "pumps": { + "mongo": { + "type": "mongo", + "meta": { + "collection_name": "tyk_analytics", + "mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}" + } + } +} +``` + +###### Capping +This collection [should be capped](/api-management/tyk-pump#capping-analytics-data) due to the number of individual documents. This is especially important if the `detailed_recording` in the Gateway is turned on which means that the Gateway records the full payload of the request and response. + +###### Omitting Indexes +From Pump 1.6+, the Mongo Pumps indexes default behavior is changed and the new configuration option `omit_index_creation` is available. This option is applicable to the following Pumps: `Mongo Pump`,`Mongo Aggregate Pump` and `Mongo Selective Pump`. + +The behavior now depends upon the value of 'omit_index_creation' and the Pump in use, as follows: + +- If `omit_index_creation` is set to `true`, tyk-pump will not create any indexes (for Mongo pumps). +- If `omit_index_creation` is set to `false` (default) and you are using `DocumentDB`, tyk-pump will create the Mongo indexes. +- If `omit_index_creation` is set to `false` (default) and you are using `MongoDB`, the behavior of tyk-pump depends upon whether the collection already exists: + - If the collection exists, tyk-pump will not create the indexes again. + - If the collection does not already exist, tyk-pump will create the indexes. + +###### Dashboard Setting + +In **API Usage Data > Log Browser** screen you will see all the individual requests that the Gateway has recorded and saved in `tyk_analytics` collection using the `mongo` pump. + +Because you have the option to store and display analytics of every organization or separately per organization, you need to configure the Tyk Dashboard with the matching setting according to the way you set the pump to store the data in MongoDB. +The field [use_sharded_analytics](/tyk-dashboard/configuration#use_sharded_analytics) controls the collection that the dashboard will query. +- If `use_sharded_analytics: false` - the dashboard will query the collection `tyk_analytics` that mongo pump populated +- If `use_sharded_analytics: true` - the dashboard will query the collection that `mongo-pump-selective` pump populated + + + +##### Mongo Aggregate Pump + +**`mongo-pump-aggregate`** pump stores data in a collection called `**z_tyk_analyticz_aggregate_{ORG ID}**`. + +###### Pump Config + +```yaml +{ + ... + "pumps": { + "mongo-pump-aggregate": { + "name": "mongo-pump-aggregate", + "meta": { + "mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}", + "use_mixed_collection": true + } + } + } +} +``` + +- `use_mixed_collection: true` - will store analytics to **both** your organization defined collections `z_tyk_analyticz_aggregate_{ORG ID}` and your org-less `tyk_analytics_aggregates` collection. +- `use_mixed_collection: false`- your pump will only store analytics to your org defined collection. + +`tyk_analytics_aggregates` collection is used to query analytics across your whole Tyk setup. This can be used, for example, by a superuser role that is not attached to an organization. When set to `true`, you also need to set [use_sharded_analytics](/tyk-dashboard/configuration#use_sharded_analytics) to true in your Dashboard config. + + +###### Dashboard Setting + +This pump supplies the data for the following sub categories **`API Usage Data`**: + +* Activity by API screen +* Activity by Key screen +* Errors screen + +As with the regular analytics, because Tyk gives you the option to store and display aggregated analytics across all organizations or separately per organization, you need to configure the Tyk Dashboard with the matching setting according to the way to set the pump to store the data in MongoDB, otherwise, you won't see the data in the Dashboard. + +1. The [enable_aggregate_lookups: true](/tyk-dashboard/configuration#enable_aggregate_lookups) field must be set in the Dashboard configuration file, in order for the Dashboard to query and display the aggregated data that `mongo-pump-aggregate` saved to MongoDB. + +###### Capping +As a minimal number of documents get stored, you don't need to worry about capping this. The documents contain aggregate info across an individual API, such as total requests, errors, tags and more. + +####### High Traffic Environment Settings + +If you have a high traffic environment, and you want to ignore aggregations to avoid Mongo overloading and/or reduce aggregation documents size, you can do it using the `ignore_aggregations` configuration option. The possible values are: +* APIID +* Errors +* Versions +* APIKeys +* OauthIDs +* Geo +* Tags +* Endpoints +* KeyEndpoint +* OauthEndpoint +* ApiEndpoint + +For example, if you want to ignore the API Keys aggregations: +```yaml +pump.conf: + +{ + ... + "pumps": { + "mongo-pump-aggregate": { + "name": "mongo-pump-aggregate", + "meta": { + "mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}", + "use_mixed_collection": true, + "ignore_aggregations": ["APIKeys"] + } + } + } +} +``` + +####### Unique Aggregation Points + +In case you set your API definition in the Tyk Gateway to tag unique headers (like `request_id` or timestamp), this collection can grow a lot since aggregation of unique values simply creates a record/document for every single value with a counter of 1. To mitigate this, avoid tagging unique headers as the first option. If you can't change the API definition quickly, you can add the tag to the ignore list `"ignore_aggregations": ["request_id"]`. This ensures that Tyk pump does not aggregate per `request_id`. +Also, if you are not sure what's causing the growth of the collection, you can also set time capping on these collections and monitor them. + + +##### Mongo Selective Pump + +**`mongo-pump-selective`** pump stores individual requests per organization in collections called **`z_tyk_analyticz_{ORG ID}`**. +Similar to the regular `mongo` pump, Each request will be stored as a single document. + +###### Pump Config + +This collection [should be capped](/api-management/tyk-pump#tyk-pump-capping-analytics-data-storage) due to the number of individual documents. +```yaml +{ + ... + "pumps": { + "mongo-pump-selective": { + "name": "mongo-pump-selective", + "meta": { + "mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}", + "use_mixed_collection": true + } + } + } +} +``` + +###### Capping + +This collection [should be capped](/api-management/tyk-pump#tyk-pump-capping-analytics-data-storage) due to the number of individual documents. + +###### Dashboard Setting + +As with the regular analytics, if you are using the Selective pump, you need to set `use_sharded_keys: true` in the dashboard config file so it will query `z_tyk_analyticz_{ORG ID}` collections to populate the `Log Browser`. + +##### Uptime Tests Analytics + +###### Pump Configuration + +```yaml +"uptime_pump_config": { + "collection_name": "tyk_uptime_analytics", + "mongo_url": "mongodb://tyk-mongo:27017/tyk_analytics", + }, +``` + +###### Tyk Dashboard Configuration + +```yaml + β€œstorage” : { + ... + β€œuptime”: { + "type": "postgres", + "connection_string": "user=root password=admin database=tyk-demo-db host=tyk-db port=5432", + } + } +} +``` + +###### Tyk Gateway Setting + +To enable Uptime Pump, modify gateway configuration [enable_uptime_analytics](/tyk-oss-gateway/configuration#uptime_testsconfigenable_uptime_analytics) to true. + +#### SQL + +When using one of our [supported SQL platforms](/api-management/dashboard-configuration#supported-database), Tyk offers 3 types of SQL pumps: + +1. Aggregated Analytics: `sql_aggregate` +2. Raw Logs Analytics: `sql` +3. Uptime Tests Analytics + +In a production environment, we recommend sharding. You can configure your analytics in the following ways: + +* Sharding **raw logs** +* Sharding **aggregated analytics** +* Sharding **uptime tests** + +##### SQL Pump + +While aggregated analytics offer a decent amount of details, there are use cases when you’d like to have access to all request details in your analytics. For that you can generate analytics based on raw logs. This is especially helpful when, once you have all the analytics generated based on raw logs stored in your SQL database, you can then build your own custom metrics, charts etc. outside of your Tyk Dashboard, which may bring more value to your product. + +The pump needed for storing log data in the database is very similar to other pumps as well as the storage setting in the Tyk Dashboard config. It just requires the SQL name and database-specific configuration options. + +###### SQL Pump Configuration + +For storing logs into the `tyk_analytics` database table. + +```yaml +"sql": { + "name": "sql", + "meta": { + "type": "postgres", + "connection_string": "host=localhost port=5432 user=admin dbname=postgres_test password=test", + "table_sharding": false + } +} +``` +`type` - The supported types are `sqlite` and `postgres`. + +`connection_string` - Specifies the connection string to the database. For example, for `sqlite` it will be the path/name of the database, and for `postgres`, specifying the host, port, user, password, and dbname. + +`log_level` - Specifies the SQL log verbosity. The possible values are: `info`,`error` and `warning`. By default, the value is `silent`, which means that it won't log any SQL query. + +`table_sharding` - Specifies if all the analytics records are going to be stored in one table or in multiple tables (one per day). By default, it is set to `false`. + +If `table_sharding` is `false`, all the records are going to be stored in the `tyk_analytics` table. If set to `true`, daily records are stored in a `tyk_analytics_YYYYMMDD` date formatted table. + +###### Dashboard Setting + +In the **API Usage Data > Log Browser** screen you will see all the individual requests that the Gateway has recorded and saved in `tyk_analytics` collection using the `sql` pump. + +Make sure you have configured the dashboard with your SQL database connection settings: + +```yaml +{ + ... + "storage" : { + ... + "analytics": { + "type": "postgres", + "connection_string": "user=root password=admin host=tyk-db database=tyk-demo-db port=5432", + } + } +} +``` + +##### SQL Aggregate Pump + +This is the default option offered by Tyk, because it is configured to store the most important analytics details which will satisfy the needs of most of our clients. This allows your system to save database space and reporting is faster, consuming fewer resources. + +###### SQL Aggregate Pump Configuration + +For storing logs into the `tyk_aggregated` database table. + +```yaml +"sql_aggregate": { + "name": "sql_aggregate", + "meta": { + "type": "postgres", + "connection_string": "host=localhost port=5432 user=admin dbname=postgres_test password=test", + "table_sharding": true + } +} +``` + +`type` - The supported types are `sqlite` and `postgres`. + +`connection_string` - Specifies the connection string to the database. For example, for `sqlite` it will be the path/name of the database, and for `postgres`, specifying the host, port, user, password, and dbname. + +`log_level` - Specifies the SQL log verbosity. The possible values are: `info`, `error`, and `warning`. By default, the value is `silent`, which means that it won't log any SQL query. + +`track_all_paths` - Specifies if it should store aggregated data for all the endpoints. By default, it is set to `false`, which means that it only stores aggregated data for `tracked endpoints`. + +`ignore_tag_prefix_list` - Specifies prefixes of tags that should be ignored. + +`table_sharding` - Specifies if all the analytics records are going to be stored in one table or in multiple tables (one per day). By default, it is set to `false`. + +If `table_sharding` is `false`, all the records are going to be stored in the `tyk_aggregated` table. If set to `true`, daily records are stored in a `tyk_aggregated_YYYYMMDD` date formatted table. + +###### Dashboard Setting + +This pump supplies the data for the following sub categories **`API Usage Data`**: + +* Activity by API screen +* Activity by Key screen +* Errors screen + +As with the regular analytics, because Tyk gives you the option to store and display aggregated analytics across all organizations or separately per organization, you need to configure the Tyk Dashboard with the matching set according to the way to set the pump to store the data in SQL, otherwise, you won't see the data in the Dashboard. + +1. The [enable_aggregate_lookups: true](/tyk-dashboard/configuration#enable_aggregate_lookups) field must be set in the Dashboard configuration file, in order for the Dashboard to query and display the aggregated data that `sql-aggregate` saved to the database. + +2. Make sure you have configured the dashboard with your SQL database connection settings: + +```yaml +{ + ... + "storage": { + ... + "analytics": { + "type": "postgres", + "connection_string": "user=root password=admin host=tyk-db database=tyk-demo-db port=5432", + } + } +} +``` + +##### SQL Uptime Pump + +In an `uptime_pump_config` section, you can configure a SQL uptime pump. To do that, you need to add the field `uptime_type` with `sql` value. + +```yaml +"uptime_pump_config": { + "uptime_type": "sql", + "type": "postgres", + "connection_string": "host=sql_host port=sql_port user=sql_usr dbname=dbname password=sql_pw", + "table_sharding": false +}, +``` +`type` - The supported types are `sqlite` and `postgres`. + +`connection_string` - Specifies the connection string to the database. For example, for `sqlite` it will be the path/name of the database, and for `postgres`, specifying the host, port, user, password, and dbname. + +`table_sharding` - Specifies if all the analytics records will be stored in one table or multiple tables (one per day). By default, it is set to `false`. + +If `table_sharding` is `false`, all the records will be stored in the `tyk_analytics` table. If set to `true`, daily records are stored in a `tyk_analytics_YYYYMMDD` date formatted table. + +###### Tyk Dashboard Configuration + +You need to set `enable_aggregate_lookups` to `false` + +Then add your SQL database connection settings: + +```yaml +{ + ... + β€œstorage” : { + ... + β€œanalytics”: { + "type": "postgres", + "connection_string": "user=root password=admin host=tyk-db database=tyk-demo-db port=5432", + } + } +} +``` + +##### Uptime Tests Analytics + +###### Tyk Pump Configuration + +For storing logs into the `tyk_aggregated` database table. + +```yaml +"uptime_pump_config": { + "uptime_type": "sql", + "type": "postgres", + "connection_string": "host=sql_host port=sql_port user=sql_usr database=tyk-demo-db password=sql_pw", +}, +``` + +###### Tyk Dashboard Configuration + +```{.shell} + β€œstorage” : { + ... + β€œuptime”: { + "type": "postgres", + "connection_string": "user=root password=admin database=tyk-demo-db host=tyk-db port=5432", + } + } +} +``` + +###### Tyk Gateway Setting + +To enable Uptime Pump, modify gateway configuration [enable_uptime_analytics](/tyk-oss-gateway/configuration#uptime_testsconfigenable_uptime_analytics) to true. + +##### Sharding + +In a production environment, we recommend the following setup: + +By default, all logs/analytics are stored in one database table, making it hard and less performant to execute CRUD operations on the dataset when it grows significantly. + +To improve the data maintenance processes, as querying or removing data from one single table is slow, we have added a new option (`table_sharding`), so that the data can be stored daily (one table of data per day), which will automatically make querying or removing sets of data easier, whether dropping tables for removing logs/analytics, or reading multiple tables based on the selected period. + +###### Tyk Pump Configuration + +```yaml +"sql": { + ... + "meta": { + ... + "table_sharding": true + } +}, +"sql_aggregate" : { + ... + "meta": { + ... + "table_sharding": true + } +}, +"uptime_pump_config": { + ... + "table_sharding": true +}, +``` + +###### Tyk Dashboard Configuration + +```yaml + "storage": { + "main": { + ... + "table_sharding": true + }, + "analytics": { + ... + "table_sharding": true + }, + "logs": { + ... + "table_sharding": true + }, + "uptime": { + ... + "table_sharding": true + } + }, +``` + +### Graph Pump setup + +#### MongoDB + +Starting with version `1.7.0` of Tyk Pump and version `4.3.0` of Tyk Gateway it is possible to configure Graph MongoDB Pump. Once configured, the pump enables support for Graphql-specific metrics. The Graphql-specific metrics currently supported include (more to be added in future versions ): + +* Types Requested. +* Fields requested for each type. +* Error Information (not limited to HTTP status codes). + +##### Setting up Graph MongoDB Pump + +1. Set `enable_analytics` to `true` in your `tyk.conf`. +2. Enable Detailed recording by setting `enable_detailed_recording` in your `tyk.conf` to `true`. This is needed so that the GraphQL information can be parsed from the request body and response. + + + + + This will enable detailed recording globally, across all APIs. This means that the behavior of individual APIs that have this configuration parameter set will be overridden. The Gateway must be restarted after updating this configuration parameter. + + + +3. Set up your Mongo `collection_name`. +4. Add your Graph MongoDB Pump configuration to the list of pumps in your `pump.conf` (pump configuration file). + +Sample setup: + +``` +{ + ... + "pumps": { + ... + "mongo-graph": { + "meta": { + "collection_name": "tyk_graph_analytics", + "mongo_url": "mongodb://mongo/tyk_graph_analytics" + } + }, + } +} +``` + +##### Current limitations + +The Graph MongoDB Pump is being improved upon regularly and as such there are a few things to note about the Graph MongoDB Pump current behavior: + +* Size of your records - due to the detailed recording being needed for this Pump’s to function correctly, it is important to note that your records and consequently, your MongoDB storage could increase in size rather quickly. +* Subgraph requests are not recorded - Requests to tyk-controlled subgraphs from supergraphs in federation setting are currently not recorded by the Graph MongoDB Pump, just the supergraph requests are handled by the Graph MongoDB Pump. +* UDG requests are recorded but subsequent requests to data sources are currently ignored. +* Currently, Graph MongoDB Pump data can not be used in Tyk Dashboard yet, the data is only stored for recording purposes at the moment and can be exported to external tools for further analysis. + +#### SQL + +Starting with Version `1.8.0` of Tyk Pump and version `5.0.0` of the Tyk Gateway; It is possible to export GraphQL analytics to an SQL database. + +##### Setting up Graph SQL Pump + +With the Graph SQL pump currently includes information (per request) like: +- Types Requested +- Fields requested for each type +- Error Information +- Root Operations Requested. + + Setup steps include: +1. Set `enable_anaytics` to `true` in your `tyk.conf`. +2. Enable Detailed recording by setting `enable_detailed_recording` in your `tyk.conf` to `true`. This is needed so that the GraphQL information can be parsed from the request body and response. + + + + + This will enable detailed recording globally, across all APIs. This means that the behavior of individual APIs that have this configuration parameter set will be overridden. The Gateway must be restarted after updating this configuration parameter. + + + +3. Configure your `pump.conf` using this sample configuration: +``` +"sql-graph": { + "meta": { + "type": "postgres", + "table_name": "tyk_analytics_graph", + "connection_string": "host=localhost user=postgres password=password dbname=postgres", + "table_sharding": false + } +}, +``` +The Graph SQL pump currently supports `postgres`, `sqlite` and `mysql` databases. The `table_name` refers to the table that will be created in the case of unsharded setups, and the prefix that will be used for sharded setups +e.g `tyk_analytics_graph_20230327`. + +>The Graph SQL pump currently has the same limitations as the Graph Mongo Pump. + +##### Setting up Graph SQL Aggregate Pump +The `sql-graph-aggregate` can be configured similar to the Graph SQL pump: +``` + "sql-graph-aggregate": { + "meta": { + "type": "postgres", + "connection_string": "host=localhost port=5432 user=postgres dbname=postgres password=password", + "table_sharding": false + } +} +``` + + +## External Data Stores + +The Tyk Pump component takes all of the analytics in Tyk and moves the data from the Gateway into your Dashboard. It is possible to set it up to send the analytics data it finds to other data stores. Currently we support the following: + +- MongoDB or SQL (Used by the Tyk Dashboard) +- [CSV](/api-management/tyk-pump#csv) +- [Elasticsearch (2.0 - 7.x)](/api-management/tyk-pump#elasticsearch) +- Graylog +- Resurface.io +- InfluxDB +- [Moesif](/api-management/tyk-pump#moesif) +- [Splunk](/api-management/tyk-pump#splunk) +- StatsD +- DogStatsD +- Hybrid (Tyk RPC) +- [Prometheus](/api-management/tyk-pump#monitor-your-apis-with-prometheus) +- [Logz.io](/api-management/tyk-pump#logzio) +- Kafka +- Syslog (FluentD) + +See the [Tyk Pump Configuration](/api-management/tyk-pump) for more details. + +### CSV + +Tyk Pump can be configured to create or modify a CSV file to track API Analytics. + +#### JSON / Conf file + +Add the following configuration fields to the pumps section within your `pump.conf` file: + +```json +{ + "csv": + { + "type": "csv", + "meta": { + "csv_dir": "./your_directory_here" + } + } +} +``` + +#### Environment variables +```bash +TYK_PMP_PUMPS_CSV_TYPE=csv +TYK_PMP_PUMPS_CSV_META_CSVDIR=./your_directory_here +``` + +### Datadog + +The Tyk Pump can be configured to send your API traffic analytics to [Datadog](https://www.datadoghq.com/) with which you can build a [dashboards](https://docs.datadoghq.com/integrations/tyk/#dashboards) with various metrics based on your API traffic in Tyk. + +#### Datadog dashboard example + +We ceated a defaulkt Tyk dashboard canvat to give our users an easier starting point. You can find it in Datadog portal, under the `Dashboards --> lists` section, (https://app.datadoghq.com/dashboard/lists)[https://app.datadoghq.com/dashboard/lists], and it is called `Tyk Analytics Canvas`. To use this dashboard you will need to make sure that your datadog agent deployment has the following tag `env:tyk-demo-env` and that your Tyk Pump configuration has `dogstatsd.meta.namespace` set to `pump`. You can also import it from [Datadog official GH repo](https://github.com/DataDog/integrations-extras/blob/master/tyk/assets/dashboards/tyk_analytics_canvas.json) and change those values in the dashboard itself to visualize your analytics data as it flows into Datadog. + +Sample Datadog dashboard + + +#### Prerequisites + +- A working Datadog agent installed on your Environment. See the [Datadog Tyk integration docs](https://docs.datadoghq.com/integrations/tyk/) for more information. +- Either a [Tyk Pro install](/tyk-self-managed/install) or [Tyk OSS Gateway install](/apim/open-source/installation) along with a [Tyk Pump](/api-management/tyk-pump) install. + +#### How it works + +When running the Datadog Agent, [DogstatsD](https://github.com/TykTechnologies/tyk-pump#dogstatsd) gets the [request_time](https://docs.datadoghq.com/integrations/tyk/#data-collected) metric from your Tyk Pump in real time, per request, so you can understand the usage of your APIs and get the flexibility of aggregating by various parameters such as date, version, returned code, method etc. + +#### Tyk Pump configuration + +Below is a sample DogstatD section from a Tyk `pump.conf` file + +```json +"dogstatsd": { + "type": "dogstatsd", + "meta": { + "address": "dd-agent:8126", + "namespace": "tyk", + "async_uds": true, + "async_uds_write_timeout_seconds": 2, + "buffered": true, + "buffered_max_messages": 32, + "sample_rate": 0.9999999999, + "tags": [ + "method", + "response_code", + "api_version", + "api_name", + "api_id", + "org_id", + "tracked", + "path", + "oauth_id" + ] + } +}, +``` + +##### Field descriptions + +- `address`: address of the datadog agent including host & port +- `namespace`: prefix for your metrics to datadog +- `async_uds`: Enable async [UDS over UDP](https://github.com/Datadog/datadog-go#unix-domain-sockets-client) +- `async_uds_write_timeout_seconds`: Integer write timeout in seconds if `async_uds: true` +- `buffered`: Enable buffering of messages +- `buffered_max_messages`: Max messages in single datagram if `buffered: true`. Default 16 +- `sample_rate`: default 1 which equates to 100% of requests. To sample at 50%, set to 0.5 +- `tags`: List of tags to be added to the metric. The possible options are listed in the below example + +If no tag is specified the fallback behavior is to use the below tags: +- `path` +- `method` +- `response_code` +- `api_version` +- `api_name` +- `api_id` +- `org_id` +- `tracked` +- `oauth_id` + +Note that this configuration can generate significant data due to the unbound nature of the `path` tag. + + +On startup, you should see the loaded configs when initialising the DogstatsD pump +```console +[May 10 15:23:44] INFO dogstatsd: initializing pump +[May 10 15:23:44] INFO dogstatsd: namespace: pump. +[May 10 15:23:44] INFO dogstatsd: sample_rate: 50% +[May 10 15:23:44] INFO dogstatsd: buffered: true, max_messages: 32 +[May 10 15:23:44] INFO dogstatsd: async_uds: true, write_timeout: 2s +``` + + +### Elasticsearch + +[Elasticsearch](https://www.elastic.co/) is a highly scalable and distributed search engine that is designed to handle large amounts of data. + + +#### JSON / Conf + +Add the following configuration fields to the pumps section within your `pump.conf` file: + +```json +{ + "pumps": { + "elasticsearch": { + "type": "elasticsearch", + "meta": { + "index_name": "tyk_analytics", + "elasticsearch_url": "http://localhost:9200", + "enable_sniffing": false, + "document_type": "tyk_analytics", + "rolling_index": false, + "extended_stats": false, + "version": "6" + } + } + } +} +``` + +#### Configuration fields +- `index_name`: The name of the index that all the analytics data will be placed in. Defaults to `tyk_analytics` +- `elasticsearch_url`: If sniffing is disabled, the URL that all data will be sent to. Defaults to `http://localhost:9200` +- `enable_sniffing`: If sniffing is enabled, the `elasticsearch_url` will be used to make a request to get a list of all the nodes in the cluster, the returned addresses will then be used. Defaults to `false` +- `document_type`: The type of the document that is created in Elasticsearch. Defaults to `tyk_analytics` +- `rolling_index`: Appends the date to the end of the index name, so each days data is split into a different index name. For example, `tyk_analytics-2016.02.28`. Defaults to `false`. +- `extended_stats`: If set to true will include the following additional fields: `Raw Request`, `Raw Response` and `User Agent`. +- `version`: Specifies the ES version. Use `3` for ES 3.X, `5` for ES 5.X, `6` for ES 6.X, `7` for ES 7.X . Defaults to `3`. +- `disable_bulk`: Disable batch writing. Defaults to `false`. +- `bulk_config`: Batch writing trigger configuration. Each option is an OR with each other: + - `workers`: Number of workers. Defaults to `1`. + - `flush_interval`: Specifies the time in seconds to flush the data and send it to ES. Default is disabled. + - `bulk_actions`: Specifies the number of requests needed to flush the data and send it to ES. Defaults to 1000 requests. If it is needed, can be disabled with `-1`. + - `bulk_size`: Specifies the size (in bytes) needed to flush the data and send it to ES. Defaults to 5MB. Can be disabled with `-1`. + + +#### Environment variables +```bash +TYK_PMP_PUMPS_ELASTICSEARCH_TYPE=elasticsearch +TYK_PMP_PUMPS_ELASTICSEARCH_META_INDEXNAME=tyk_analytics +TYK_PMP_PUMPS_ELASTICSEARCH_META_ELASTICSEARCHURL=http://localhost:9200 +TYK_PMP_PUMPS_ELASTICSEARCH_META_ENABLESNIFFING=false +TYK_PMP_PUMPS_ELASTICSEARCH_META_DOCUMENTTYPE=tyk_analytics +TYK_PMP_PUMPS_ELASTICSEARCH_META_ROLLINGINDEX=false +TYK_PMP_PUMPS_ELASTICSEARCH_META_EXTENDEDSTATISTICS=false +TYK_PMP_PUMPS_ELASTICSEARCH_META_VERSION=5 +TYK_PMP_PUMPS_ELASTICSEARCH_META_BULKCONFIG_WORKERS=2 +TYK_PMP_PUMPS_ELASTICSEARCH_META_BULKCONFIG_FLUSHINTERVAL=60 +``` + +### Moesif + +This is a step by step guide to setting up [Moesif API Analytics and Monetization platform](https://www.moesif.com/solutions/track-api-program?language=tyk-api-gateway&utm_medium=docs&utm_campaign=partners&utm_source=tyk) to understand [customer API usage](https://www.moesif.com/features/api-analytics?utm_medium=docs&utm_campaign=partners&utm_source=tyk) and [setup usage-based billing](https://www.moesif.com/solutions/metered-api-billing?utm_medium=docs&utm_campaign=partners&utm_source=tyk). + +We also have a [blog post](https://tyk.io/blog/tyk-moesif-the-perfect-pairing/) which highlights how Tyk and Moesif work together. + +The assumptions are that you have Docker installed and Tyk Self-Managed already running. +See the [Tyk Pump Configuration](/api-management/tyk-pump) for more details. + + +#### Overview + +With the Moesif Tyk plugin, your API logs are sent to Moesif asynchronously to provide analytics on customer API usage along with your API payloads like JSON and XML. This plugin also enables you to monetize your API with [billing meters](https://www.moesif.com/solutions/metered-api-billing?utm_medium=docs&utm_campaign=partners&utm_source=tyk) and provide a self-service onboarding experience. Moesif also collects information such as the authenticated user (AliasId or OAuthId) to identify customers using your API. An overview on how Moesif and Tyk works together is [available here](https://tyk.io/blog/tyk-moesif-the-perfect-pairing/). + +#### Steps for Configuration + +1. **Get a Moesif Application Id** + + Go to [www.moesif.com](https://www.moesif.com/?language=tyk-api-gateway) and sign up for a free account. + Application Ids are write-only API keys specific to an application in Moesif such as β€œDevelopment” or β€œProduction”. You can always create more applications in Moesif. + +2. **Enable Moesif backend in Tyk Pump** + + Add Moesif as an analytics backend along with your Moesif Application Id you obtained in the last step to your [Tyk Pump](https://github.com/TykTechnologies/tyk-pump) Configuration + +####### JSON / Conf File +```json +{ + "pumps": { + "moesif": { + "name": "moesif", + "meta": { + "application_id": "Your Moesif Application Id" + } + } + } +} +``` + +####### Env Variables: +``` +TYK_PMP_PUMPS_MOESIF_TYPE=moesif +TYK_PMP_PUMPS_MOESIF_META_APPLICATIONID=your_moesif_application_id +``` + +3. **Ensure analytics is enabled** + +If you want to log HTTP headers and body, ensure that [detailed analytics recording](/api-management/logs-metrics#capturing-detailed-logs) is enabled true in your [Tyk Gateway Conf](/tyk-oss-gateway/configuration) + +####### JSON / Conf File + +```json +{ + "enable_analytics" : true, + "analytics_config": { + "enable_detailed_recording": true + } +} +``` + +####### Env Variables: +```conf +TYK_GW_ENABLEANALYTICS=true +TYK_GW_ANALYTICSCONFIG_ENABLEDETAILEDRECORDING=true +``` + + +This will enable detailed recording globally, across all APIs. This means that the behavior of individual APIs that have this configuration parameter set will be overridden. The Gateway must be restarted after updating this configuration parameter. + + + +4. **Restart Tyk Pump to pickup the Moesif config** + +Once your config changes are done, you need to restart your Tyk Pump and Tyk Gateway instances (if you've modified Tyk gateway config). +If you are running Tyk Pump in Docker: + +`$ docker restart tyk-pump` + +5. **PROFIT!** + +You can now make a few API calls and verify they show up in Moesif. + +```bash +$ curl localhost:8080 +``` +Step5 + +The Moesif Tyk integration automatically maps a [Tyk Token Alias](https://tyk.io/blog/simpler-usage-tracking-token-aliases-tyk-cloud/) to a user id in Moesif. With a Moesif SDK, you can store additional customer demographics to break down API usage by customer email, company industry, and more. + +#### Configuration options + +The Tyk Pump for Moesif has a few configuration options that can be set in your `pump.env`: + +|Parameter|Required|Description|Environment Variable| +| :--------- | :--------- | :----------- | :----------- | +|application_id|required|Moesif Application Id. Multiple Tyk api_id's will be logged under the same app id.|TYK_PMP_PUMPS_MOESIF_META_APPLICATIONID| +|request_header_masks|optional|Mask a specific request header field. Type: String Array [] string|TYK_PMP_PUMPS_MOESIF_META_REQUESTHEADERMASKS| +|request_body_masks|optional|Mask a specific - request body field. Type: String Array [] string| TYK_PMP_PUMPS_MOESIF_META_REQUESTBODYMASKS | +|response_header_masks|optional|Mask a specific response header field. Type: String Array [] string|TYK_PMP_PUMPS_MOESIF_META_RESPONSEHEADERMASKS| +|response_body_masks|optional|Mask a specific response body field. Type: String Array [] string|TYK_PMP_PUMPS_MOESIF_META_RESPONSEBODYMASKS| +|disable_capture_request_body|optional|Disable logging of request body. Type: Boolean. Default value is false.|TYK_PMP_PUMPS_MOESIF_META_DISABLECAPTUREREQUESTBODY| +|disable_capture_response_body|optional|Disable logging of response body. Type: Boolean. Default value is false.|TYK_PMP_PUMPS_MOESIF_META_DISABLECAPTURERESPONSEBODY| +|user_id_header|optional|Field name to identify User from a request or response header. Type: String. Default maps to the token alias|TYK_PMP_PUMPS_MOESIF_META_USERIDHEADER| +|company_id_header|optional|Field name to identify Company (Account) from a request or response header. Type: String|TYK_PMP_PUMPS_MOESIF_META_COMPANYIDHEADER| + +#### Identifying users +By default, the plugin will collect the authenticated user (AliasId or OAuthId) to identify the customer. This can be overridden by setting the `user_id_header` to a header that contains your API user/consumer id such as `X-Consumer-Id`. You can also set the `company_id_header` which contains the company to link the user to. [See Moesif docs on identifying customers](https://www.moesif.com/docs/getting-started/identify-customers/?utm_medium=docs&utm_campaign=partners&utm_source=tyk) + + +### Splunk + +This is a step by step guide to setting Splunk to receive logs from the Tyk Pump. + +The assumptions are that you have Docker installed and Tyk Pro Self-Managed already running. + +#### Steps for Configuration + +1. **Run Splunk using Docker** + + Assuming you have Docker installed locally, run the following from a terminal: + + ```{.copyWrapper} + $ docker run \ + -p 8000:8000 \ + -p 8088:8088 \ + -v splunk-data:/opt/splunk/var \ + -v splunk-data:/opt/splunk/etc \ + -e SPLUNK_START_ARGS=--accept-license \ + -e SPLUNK_PASSWORD=mypassword \ + splunk/splunk:latest + ``` + +2. **Setup a collector in Splunk** + + A) Visit http://localhost:8000 and log into the Splunk Dashboard using the username `admin` and the password we set in the Docker run command, `mypassword` + + B) Create a new Data input + Step1 + + C) Select `HTTP Event Collector -> Add New` + Step2 + + D) Set the name to "tyk" and then leave everything else as default + Step2b + + Grab your token at the end page: + Step3 + +3. **Add the Splunk bit to pump.conf** + + Edit your pump's `pump.conf` and add this bit to the "Pumps" section, like so, adding the token from step #1: + + Make sure to add your token from the previous step into the `collector_token` field above + + ```json + { + "pumps": { + "splunk": { + "type": "splunk", + "meta": { + "collector_token": "", + "collector_url": "https://localhost:8088/services/collector/event", + "ssl_insecure_skip_verify": true + } + } + } + } + ``` + + +Make sure that the `localhost` value matches with your setup. Head on over to our [community forum](https://community.tyk.io/) to ask for help if you are stuck here. + + + +4. **Restart Tyk Pump to pickup the Splunk config** + + If you are running Tyk Pump in Docker: + + `$ docker restart tyk-pump` + +5. **PROFIT!** + + Let's make a few API calls against Tyk, and see if they flow into Splunk + + ```bash + $ curl localhost:8080/loan-service-api/ + + { + "error": "Key not authorized" + }% + ``` + + Success: + Step4 + +### Logzio + +[Logz.io](https://logz.io/) is a cloud-based log management and analytics platform that provides log management built on [Elasticsearch](https://www.elastic.co/), [Logstash](https://www.elastic.co/guide/en/logstash/current/index.html) and [Kibana](https://www.elastic.co/kibana/). + + +#### JSON / Conf file + +Add the following configuration fields to the pumps section within your `pump.conf` file: + +```json +{ + "pumps" + { + "logzio": { + "type": "logzio", + "meta": { + "token": "" + } + } + } +} +``` + +#### Environment variables +```bash +TYK_PMP_PUMPS_LOGZIO_TYPE=logzio +TYK_PMP_PUMPS_LOGZIO_META_TOKEN="{YOUR-LOGZIO-TOKEN}" +``` + +#### Advanced configuration fields +- `meta.url`: Use if you do not want to use the default Logz.io URL, for example when using a proxy. The default url is `https://listener.logz.io:8071`. +- `meta.queue_dir`: The directory for the queue. +- `meta.drain_duration`: This sets the drain duration (when to flush logs on the disk). The default value is `3s`. +- `meta.disk_threshold`: Set the disk queue threshold. Once the threshold is crossed the sender will not enqueue the received logs. The default value is `98` (percentage of disk). +- `meta.check_disk_space`: Set the sender to check if it crosses the maximum allowed disk usage. The default value is `true`. + +## Tyk Analytics Record Fields + +Below is a detailed list of each field contained within our Tyk Analytics Record that is sent from Tyk Pump. + +### Method +Request method. + +**Example:** `GET`, `POST`. + +### Host +Request `Host` header. + +**Remarks:** Includes host and optional port number of the server to which the request was sent. +**Example:** `tyk.io`, or `tyk.io:8080` if port is included. + +### Path +Request path. + +**Remarks:** Displayed in decoded form.
+**Example:** `/foo/bar` for `/foo%2Fbar` or `/foo/bar`. + +### RawPath +Request path. + +**Remarks:** Original request path without changes just decoded.
+**Example:** `/foo/bar` for `/foo%2Fbar` or `/foo/bar`. + +### ContentLength +Request `Content-Length` header. + +**Remarks:** The number of bytes in the request body.
+**Example:** `10` for request body `0123456789`. + +### UserAgent +Request `User-Agent` header. + +**Example:** `curl/7.86.0`. + +### Day +Request day. + +**Remarks:** Based on `TimeStamp` field.
+**Example:** `16` for `2022-11-16T03:01:54Z`. + +### Month +Request month. + +**Remarks:** Based on `TimeStamp` field.
+**Example:** `11` for `2022-11-16T03:01:54Z`. + +### Year +Request year. + +**Remarks:** Based on `TimeStamp` field. +**Example:** `2022` for `2022-11-16T03:01:54Z`. + +### Hour +Request hour. + +**Remarks:** Based on `TimeStamp` field.
+**Example:** `3` for `2022-11-16T03:01:54Z`. + +### ResponseCode +Response code. + +**Remarks:** Only contains the integer element of the response code. Can be generated by either the gateway or upstream server, depending on how the request is handled.
+**Example:** `200` for `200 OK`. + +### APIKey +Request authentication key. + +**Remarks:** OAuthentication key, as provided in request. If no API key is provided then gateway will substitute a default value.
+**Example:** Unhashed `auth_key`, hashed `6129dc1e8b64c6b4`, or `00000000` if no authentication provided. + +### TimeStamp +Request timestamp. + +**Remarks:** Generated by the gateway, based on the time it receives the request from the client.
+**Example:** `2022-11-16T03:01:54.648+00:00`. + +### APIVersion +Version of API Definition requested. + +**Remarks:** Based on version configuration of context API definition. If API is unversioned then value is "Not Versioned".
+**Example:** Could be an alphanumeric value such as `1` or `b`. Is `Not Versioned` if not versioned. + +### APIName +Name of API Definition requested. + +**Example:** `Foo API`. + +### APIID +Id of API Definition requested. + +**Example:** `727dad853a8a45f64ab981154d1ffdad`. + +### OrgID +Organization Id of API Definition requested. + +**Example:** `5e9d9544a1dcd60001d0ed20`. + +### OauthID +Id of OAuth client. + +**Remarks:** Value is empty string if not using OAuth, or OAuth client not present.
+**Example:** `my-oauth-client-id`. + +### RequestTime +Duration of upstream roundtrip. + +**Remarks:** Equal to value of `Latency.Total` field. +**Example:** `3` for a 3ms roundtrip. + +### RawRequest +Raw HTTP request. + +**Remarks:** Base64 encoded copy of the request sent from the gateway to the upstream server.
+**Example:** `R0VUIC9nZXQgSFRUUC8xLjEKSG9zdDogdHlrLmlv`. + +### RawResponse +Raw HTTP response. + +**Remarks:** Base64 encoded copy of the response sent from the gateway to the client.
+**Example:** `SFRUUC8xLjEgMjAwIE9LCkNvbnRlbnQtTGVuZ3RoOiAxOQpEYXRlOiBXZWQsIDE2IE5vdiAyMDIyIDA2OjIxOjE2IEdNVApTZXJ2ZXI6IGd1bmljb3JuLzE5LjkuMAoKewogICJmb28iOiAiYmFyIgp9Cg==`. + +### IPAddress +Client IP address. + +**Remarks:** Taken from either `X-Real-IP` or `X-Forwarded-For` request headers, if set. Otherwise, determined by gateway based on request.
+**Example:** `172.18.0.1`. + +### Geo +Client geolocation data. + +**Remarks:** Calculated using MaxMind database, based on client IP address.
+**Example:** `{"country":{"isocode":"SG"},"city":{"geonameid":0,"names":{}},"location":{"latitude":0,"longitude":0,"timezone":""}}`. + +### Network +Network statistics. + +**Remarks:** Not currently used. + +### Latency +Latency statistics + +**Remarks:** Contains two fields; `upstream` is the roundtrip duration between the gateway sending the request to the upstream server and it receiving a response. `total` is the `upstream` value plus additional gateway-side functionality such as processing analytics data.
+**Example:** `{"total":3,"upstream":3}`. + + + +We record the round trip time of the call from the gateways reverse proxy. So what you get is the sum of `leaving Tyk -> upstream -> response received back at Tyk`. + + + +### Tags +Session context tags. + +**Remarks:** Can contain many tags which refer to many things, such as the gateway, API key, organization, API definition etc.
+**Example:** `["key-00000000","org-5e9d9544a1dcd60001d0ed20","api-accbdd1b89e84ec97f4f16d4e3197d5c"]`. + +### Alias +Session alias. + +**Remarks:** Alias of the context authenticated identity. Blank if no alias set or request is unauthenticated.
+**Example:** `my-key-alias`. + +### TrackPath +Tracked endpoint flag. + +**Remarks:** Value is `true` if the requested endpoint is configured to be tracked, otherwise `false`.
+**Example:** `true` or `false`. + +### ExpireAt +Future expiry date. + +**Remarks:** Can be used to implement automated data expiry, if supported by storage.
+**Example:** `2022-11-23T07:26:25.762+00:00`. + +## Monitor your APIs with Prometheus + +Your Tyk Pump can expose Prometheus metrics for the requests served by your Tyk Gateway. This is helpful if you want to track how often your APIs are being called and how they are performing. Tyk collects latency data of how long your services take to respond to requests, how often your services are being called and what status code they return. + +We have created a [demo project in GitHub](https://github.com/TykTechnologies/demo-slo-prometheus-grafana) if you want to see this setup in action. + +### Prerequisites + +- A Tyk installation (either Self-Managed or Open Source Gateway) +- Tyk Pump 1.6 or higher + +### Configure Tyk Pump to expose Prometheus metrics + +Prometheus collects metrics from targets by scraping metrics HTTP endpoints. To expose Tyk’s metrics in the Prometheus format, you need to add the following lines to your Tyk Pump configuration file `pump.conf`: + +#### Host + +```.copyWriter +"prometheus": { + "type": "prometheus", + "meta": { + "listen_address": ":9090", + "path": "/metrics", + "custom_metrics":[ + { + "name":"tyk_http_requests_total", + "description":"Total of API requests", + "metric_type":"counter", + "labels":["response_code","api_name","method","api_key","alias","path"] + }, + { + "name":"tyk_http_latency", + "description":"Latency of API requests", + "metric_type":"histogram", + "labels":["type","response_code","api_name","method","api_key","alias","path"] + } + ] + } +} +``` + +Replace `` with your host name or IP address. + +#### Docker + +```.copyWrapper +"prometheus": { + "type": "prometheus", + "meta": { + "listen_address": ":9090", + "path": "/metrics", + "custom_metrics":[ + { + "name":"tyk_http_requests_total", + "description":"Total of API requests", + "metric_type":"counter", + "labels":["response_code","api_name","method","api_key","alias","path"] + }, + { + "name":"tyk_http_latency", + "description":"Latency of API requests", + "metric_type":"histogram", + "labels":["type","response_code","api_name","method","api_key","alias","path"] + } + ] + } +} +``` + +Port 9090 needs also to be exported by Docker in addition to the port used for health check (here 8083), e.g. with Docker compose: + +```.copyWrapper +tyk-pump: + image: tykio/tyk-pump-docker-pub:${PUMP_VERSION} + ports: + - 8083:8083 + - 9090:9090 +``` + +Restart your Pump to apply to configuration change. + +Verify that the metrics are being exposed by calling the metrics endpoint `http://:9090` from your browser. + +### Configure Prometheus to scrape the metrics endpoint + +Prometheus is configured via a [configuration file](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) where you can define the metrics endpoint Prometheus will scrape periodically. + +Here’s an example configuration scraping Tyk Pump metric endpoints: + +#### Host + +```.copyWrapper +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + - job_name: tyk + static_configs: + - targets: ['tyk-pump:9090'] +``` +#### Docker + +```.copyWrapper +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + - job_name: tyk + static_configs: + - targets: ['host.docker.internal:9090'] +``` +1. Then restart your Prometheus instance after any configuration change +2. In Prometheus under β€œStatus” / β€œTargets”, we can see that Prometheus is able to scrape the metrics successfully: state is UP. + +Prometheus status + +### Exploring your metrics in Grafana + +Before trying out, make sure to generate traffic by calling your APIs. You will find a [couple of useful queries](https://github.com/TykTechnologies/tyk-pump#prometheus) in our Tyk Pump GitHub repo based on the metrics exposed by Tyk. These will demonstrate which metric types are exported and how you can customize them. + +You also need to make sure that Grafana is connected to your Prometheus server. This can be configured under [Configuration / Data sources](https://grafana.com/docs/grafana/latest/datasources/add-a-data-source/) + +Grafana Configuration with Prometheus + +### Useful queries + +Here are some useful queries to help you monitor the health of your APIs: + +#### Upstream time across all services + +Tyk collects latency data of how long your upstream services take to respond to requests. This data can be used to configure an alert if the latency goes beyond a certain threshold. This query calculated the 95th percentile of the total request latency of all the upstream services. To run the query: + +``` +histogram_quantile(0.95, sum(rate(tyk_http_latency_bucket[1m])) by (le)) +``` +Upstream Time Query output + +#### Upstream time per API + +This query calculated the 95th percentile of the request latency of upstream services for the selected API. To run this query: + +``` +histogram_quantile(0.90, sum(rate(tyk_http_latency_bucket{api_name=""}[1m])) by (le,api_name)) +``` +Replace `` with the name of your API for this query. + +#### Request rate + +Track the request rate of your services: + +``` +sum (rate(tyk_http_requests_total[1m])) +``` + +#### Request Rate per API + +Track the request rate of your services for the selected API: + +``` +sum (rate(tyk_http_requests_total{api_name=""}[1m])) +``` +Replace `` with the name of your API for this query. + +#### Error Rates + +Track the error rate your services are serving: + +``` +sum (rate(tyk_http_requests_total{response_code =~"5.."}[1m])) +``` + +#### Error rates per API + +Track the error rate your services are serving for the selected API: + +``` +sum (rate(tyk_http_requests_total{response_code =~"5..", api_name="httpbin - HTTP Request & Response Service"}[1m])) +``` +Replace `` with the name of your API for this query. + +## Setup Prometheus Pump + +We'll show you how to setup Tyk Pump for Prometheus Service Discovery. + +pump-prometheus + +### Integrate with Prometheus using Prometheus Operator + +**Steps for Configuration:** + +1. **Setup Prometheus** + + *Using the prometheus-community/kube-prometheus-stack chart* + + In this example, we use [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), which installs a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator). + + ```bash + helm install prometheus-stack prometheus-community/kube-prometheus-stack -n monitoring --create-namespace + ``` + + This is a useful stack where you can get Prometheus, the Prometheus Operator, and Grafana all deployed and configured in one go. + +2. **Install Tyk Pump with PodMonitor** + + If you have Prometheus Operator enabled on the cluster, it would look for β€œPodMonitor” or β€œServiceMonitor” resources and scrap from specified port. The only thing you would need to modify here is the helm release name for Prometheus Operator. + + Also you can customize Prometheus Custom Metrics based on your analytics needs. We are using `tyk_http_requests_total` and `tyk_http_latency` described [here](/api-management/tyk-pump#monitor-your-apis-with-prometheus) for illustration: + + ```bash + NAMESPACE=tyk-oss + APISecret=foo + REDIS_BITNAMI_CHART_VERSION=19.0.2 + PromOperator_Release=prometheus-stack + Prometheus_Custom_Metrics='[{"name":"tyk_http_requests_total"\,"description":"Total of API requests"\,"metric_type":"counter"\,"labels":["response_code"\,"api_name"\,"method"\,"api_key"\,"alias"\,"path"]}\, { "name":"tyk_http_latency"\, "description":"Latency of API requests"\, "metric_type":"histogram"\, "labels":["type"\,"response_code"\,"api_name"\,"method"\,"api_key"\,"alias"\,"path"] }]' + + helm upgrade tyk-redis oci://registry-1.docker.io/bitnamicharts/redis -n $NAMESPACE --create-namespace --install --version $REDIS_BITNAMI_CHART_VERSION + + helm upgrade tyk-oss tyk-helm/tyk-oss -n $NAMESPACE --create-namespace \ + --install \ + --set global.secrets.APISecret="$APISecret" \ + --set global.redis.addrs="{tyk-redis-master.$NAMESPACE.svc.cluster.local:6379}" \ + --set global.redis.passSecret.name=tyk-redis \ + --set global.redis.passSecret.keyName=redis-password \ + --set global.components.pump=true \ + --set "tyk-pump.pump.backend={prometheus}" \ + --set tyk-pump.pump.prometheusPump.customMetrics=$Prometheus_Custom_Metrics \ + --set tyk-pump.pump.prometheusPump.prometheusOperator.enabled=true \ + --set tyk-pump.pump.prometheusPump.prometheusOperator.podMonitorSelector.release=$PromOperator_Release + ``` + + + +Please make sure you are installing Redis versions that are supported by Tyk. Please refer to Tyk docs to get list of [supported versions](/tyk-self-managed/install#redis). + + + + + +For Custom Metrics, commas are escaped to be used in helm --set command. You can remove the backslashes in front of the commas if you are to set it in values.yaml. We have included an example in the default values.yaml comments section. + + + +3. **Verification** + + When successfully configured, you could see the following messages in pump log: + + ```console + β”‚ time="Jun 26 13:11:01" level=info msg="Starting prometheus listener on::9090" prefix=prometheus-pump β”‚ + β”‚ time="Jun 26 13:11:01" level=info msg="Prometheus Pump Initialized" prefix=prometheus-pump β”‚ + β”‚ time="Jun 26 13:11:01" level=info msg="Init Pump: PROMETHEUS" prefix=main + ``` + + On Prometheus Dashboard, you can see the Pump is listed as one of the target and Prometheus is successfully scrapped from it. + + pump-prometheus + + You can check our [Guide on Monitoring API with Prometheus](/api-management/tyk-pump#useful-queries) for a list of useful queries you can setup and use. + + e.g. The custom metrics tyk_http_requests_total can be retrieved: + + pump-prometheus + + pump-prometheus + +### Integrate with Prometheus using annotations + +**Steps for Configuration:** + +1. **Setup Prometheus** + + *Using the prometheus-community/prometheus chart* + + Alternatively, if you are not using Prometheus Operator, please check how your Prometheus can support service discovery. Let say you’re using the [prometheus-community/prometheus](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#scraping-pod-metrics-via-annotations) chart, which configures Prometheus to scrape from any Pods with following annotations: + + ```yaml + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "9090" + ``` + + To install Prometheus, run + + ```bash + helm install prometheus prometheus-community/prometheus -n monitoring --create-namespace + ``` + +2. **Install Tyk Pump with prometheus annotations** + + ```bash + NAMESPACE=tyk-oss + APISecret=foo + REDIS_BITNAMI_CHART_VERSION=19.0.2 + PromOperator_Release=prometheus-stack + Prometheus_Custom_Metrics='[{"name":"tyk_http_requests_total"\,"description":"Total of API requests"\,"metric_type":"counter"\,"labels":["response_code"\,"api_name"\,"method"\,"api_key"\,"alias"\,"path"]}\, { "name":"tyk_http_latency"\, "description":"Latency of API requests"\, "metric_type":"histogram"\, "labels":["type"\,"response_code"\,"api_name"\,"method"\,"api_key"\,"alias"\,"path"] }]' + + helm upgrade tyk-redis oci://registry-1.docker.io/bitnamicharts/redis -n $NAMESPACE --create-namespace --install --version $REDIS_BITNAMI_CHART_VERSION + + helm upgrade tyk-oss tyk-helm/tyk-oss -n $NAMESPACE --create-namespace \ + --install \ + --set global.secrets.APISecret="$APISecret" \ + --set global.redis.addrs="{tyk-redis-master.$NAMESPACE.svc.cluster.local:6379}" \ + --set global.redis.passSecret.name=tyk-redis \ + --set global.redis.passSecret.keyName=redis-password \ + --set global.components.pump=true \ + --set "tyk-pump.pump.backend={prometheus}" \ + --set tyk-pump.pump.prometheusPump.customMetrics=$Prometheus_Custom_Metrics \ + --set-string tyk-pump.pump.podAnnotations."prometheus\.io/scrape"=true \ + --set-string tyk-pump.pump.podAnnotations."prometheus\.io/port"=9090 \ + --set-string tyk-pump.pump.podAnnotations."prometheus\.io/path"=/metrics + ``` + + + +Please make sure you are installing Redis versions that are supported by Tyk. Please refer to Tyk docs to get list of [supported versions](/tyk-self-managed/install#redis). + + + +3. **Verification** + + After some time, you can see that Prometheus is successfully scraping from Tyk Pump: + + pump-prometheus + +### Expose a service for Prometheus to scrape + +You can expose Pump as a service so that Prometheus can access the `/metrics` endpoint for scraping. Just enable service in `tyk-pump.pump.service`: + +```yaml + service: + # Tyk Pump svc is disabled by default. Set it to true to enable it. + enabled: true +``` + +## Tyk Pump Capping Analytics Data Storage + +Tyk Gateways can generate a lot of analytics data. A guideline is that for every 3 million requests that your Gateway processes it will generate roughly 1GB of data. + +If you have Tyk Pump set up with the aggregate pump as well as the regular MongoDB pump, then you can make the `tyk_analytics` collection a [capped collection](https://docs.mongodb.com/manual/core/capped-collections/). Capping a collection guarantees that analytics data is rolling within a size limit, acting like a FIFO buffer which means that when it reaches a specific size, instead of continuing to grow, it will replace old records with new ones. + + + +If you are using DocumentDB, capped collections are not supported. See [here](https://docs.aws.amazon.com/documentdb/latest/developerguide/mongo-apis.html) for more details. + + + +The `tyk_analytics` collection contains granular log data, which is why it can grow rapidly. The aggregate pump will convert this data into a aggregate format and store it in a separate collection. The aggregate collection is used for processing reporting requests as it is much more efficient. + +If you've got an existing collection which you want to convert to be capped you can use the `convertToCapped` [MongoDB command](https://docs.mongodb.com/manual/reference/command/convertToCapped/). + +If you wish to configure the pump to cap the collections for you upon creating the collection, you may add the following +configurations to your `uptime_pump_config` and / or `mongo.meta` objects in `pump.conf`. + +``` +"collection_cap_max_size_bytes": 1048577, +"collection_cap_enable": true +``` + +`collection_cap_max_size_bytes` sets the maximum size of the capped collection. +`collection_cap_enable` enables capped collections. + +If capped collections are enabled and a max size is not set, a default cap size of `5Gib` is applied. +Existing collections will never be modified. + + + +An alternative to capped collections is MongoDB's **Time To Live** indexing (TTL). TTL indexes are incompatible with capped collections. If you have set a capped collection, a TTL index will not get created, and you will see error messages in the MongoDB logs. See [MongoDB TTL Docs](https://docs.mongodb.com/manual/tutorial/expire-data/) for more details on TTL indexes. + + + + +### Time Based Cap in single tenant environments + +If you wish to reduce or manage the amount of data in your MongoDB, you can add an TTL expire index to the collection, so older records will be evicted automatically. + + + +Time based caps (TTL indexes) are incompatible with already configured size based caps. + + + + +Run the following command in your preferred MongoDB tool (2592000 in our example is 30 days): + +```{.copyWrapper} +db.tyk_analytics.createIndex( { "timestamp": 1 }, { expireAfterSeconds: 2592000 } ) +``` +This [command](https://docs.mongodb.com/manual/tutorial/expire-data/#expire-documents-at-a-specific-clock-time) sets expiration rule to evict all the record from the collection which `timestamp` field is older then specified expiration time. + +### Time Based Cap in multi-tenant environments +When you have multiple organizations, you can control analytics expiration on per organization basis. +This technique also use TTL indexes, as described above, but index should look like: + +```{.copyWrapper} +db.tyk_analytics.createIndex( { "expireAt": 1 }, { expireAfterSeconds: 0 } ) +``` + +This [command](https://docs.mongodb.com/manual/tutorial/expire-data/#expire-documents-at-a-specific-clock-time) sets the value of `expireAt` to correspond to the time the document should expire. MongoDB will automatically delete documents from the `tyk_analytics` collection 0 seconds after the `expireAt` time in the document. The `expireAt` will be calculated and created by Tyk in the following step. + +#### Create an Organization Quota + +```{.copyWrapper} +curl --header "x-tyk-authorization: {tyk-gateway-secret}" --header "content-type: application/json" --data @expiry.txt http://{tyk-gateway-ip}:{port}/tyk/org/keys/{org-id} +``` + +Where context of expiry.txt is: + +```{.json} +{ + "org_id": "{your-org-id}", + "data_expires": 86400 +} +``` + +`data_expires` - Sets the data expires to a time in seconds for it to expire. Tyk will calculate the expiry date for you. + + +### Size Based Cap + +#### Add the Size Cap + + + +The size value should be in bytes, and we recommend using a value just under the amount of RAM on your machine. + + + + + +Run this [command](https://docs.mongodb.com/manual/reference/command/convertToCapped/) in your MongoDB shell: + + +```{.copyWrapper} +use tyk_analytics +db.runCommand({"convertToCapped": "tyk_analytics", size: 100000}); +``` + +#### Adding the Size Cap if using a mongo_selective Pump + +The `mongo_selective` pump stores data on a per organization basis. You will have to run the following command in your MongoDB shell for an individual organization as follows. + + +```{.copyWrapper} +db.runCommand({"convertToCapped": "z_tyk_analyticz_", size: 100000}); +``` + +## Separated Analytics Storage + +For high-traffic systems that make heavy use of analytics, it makes sense to separate out the Redis analytics server from the Redis configuration server that supplies auth tokens and handles rate limiting configuration. + +To enable a separate analytics server, update your `tyk.conf` with the following section: + +```{.copyWrapper} +"enable_separate_analytics_store": true, +"analytics_storage": { + "type": "redis", + "host": "", + "port": 0, + "addrs": [ + "localhost:6379" + ], + "username": "", + "password": "", + "database": 0, + "optimisation_max_idle": 3000, + "optimisation_max_active": 5000, + "enable_cluster": false +}, +``` + + + +`addrs` is new in v2.9.3, and replaces `hosts` which is now deprecated. + + + +If you set `enable_cluster` to `false`, you only need to set one entry in `addrs`: + +The configuration is the same (and uses the same underlying driver) as the regular configuration, so Redis Cluster is fully supported. + diff --git a/api-management/upstream-authentication.mdx b/api-management/upstream-authentication.mdx new file mode 100644 index 000000000..6f507d778 --- /dev/null +++ b/api-management/upstream-authentication.mdx @@ -0,0 +1,34 @@ +--- +title: "Upstream Authentication" +description: "Authenticating Tyk Gateway with upstream services" +keywords: "security, upstream authentication, gateway to upstream, OAuth, mTLS, Basic Auth" +sidebarTitle: "Overview" +--- + +## Introduction + +Tyk Gateway sits between your clients and your services, securely routing requests and responses. For each API proxy that you expose on Tyk, you can configure a range of different methods that clients must use to identify (authenticate) themselves to Tyk Gateway. These are described in detail in the [Client Authentication](/api-management/client-authentication) section. + +In the same way as you use Client Authentication to securely confirm the identity of the API clients, your upstream services probably need to securely confirm the identity of their client - namely Tyk. This is where Tyk's flexible **Upstream Authentication** capability comes in. + +When using Tyk, you can choose from a range of authentication methods for each upstream API: +- [Mutual TLS](/api-management/upstream-authentication/mtls) +- [Token-based authentication](/api-management/upstream-authentication/auth-token) +- [Request signing](/api-management/upstream-authentication/request-signing) +- [Basic Authentication](/api-management/upstream-authentication/basic-auth) +- [OAuth 2.0](/api-management/upstream-authentication/oauth) + - [OAuth 2.0 Client Credentials](/api-management/upstream-authentication/oauth#oauth-client-credentials) + - [OAuth 2.0 Password Grant](/api-management/upstream-authentication/oauth#oauth-resource-owner-password-credentials) + + + + + Upstream Basic Authentication and OAuth 2.0 support are only available to licensed users, via the Tyk Dashboard. These features are not available to open source users. + + + + + +Note that OAuth 2.0 Password Grant is prohibited in the [OAuth 2.0 Security Best Practice](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-security-topics-13#section-3.4") but is supported by Tyk for use with legacy upstream services. + + diff --git a/api-management/upstream-authentication/auth-token.mdx b/api-management/upstream-authentication/auth-token.mdx new file mode 100644 index 000000000..ef09bcbf8 --- /dev/null +++ b/api-management/upstream-authentication/auth-token.mdx @@ -0,0 +1,24 @@ +--- +title: "Upstream Authentication using Auth Token" +description: "How to authenticate upstream service using auth token" +keywords: "security, upstream authentication, gateway to upstream, auth token" +sidebarTitle: "Auth Token" +--- + +## Token-based authentication + +Token-based authentication (also referred to as Auth Token) is a method whereby the client is identified and authenticated by the server based on a key/token they present as a credential with each request. Typically the token is issued by the server to a specific client. + +The server determines how the key should be provided - typically in a request header, cookie or query parameter. + +Tyk supports [Auth Token](/api-management/authentication/bearer-token) as a method for authenticating **clients** with the **Gateway** - you can use Tyk Gateway or Dashboard to generate access *keys* for an Auth Token protected API as explained in the [documentation](/api-management/policies). The client must then provide the *key* in the appropriate parameter for each request. + +If your **upstream service** is protected using Auth Token then similarly, Tyk will need to provide a token, issued by the upstream, in the request. + +### How to use Upstream Token-based Authentication +Typically Auth Token uses the `Authorization` header to pass the token in the request. + +Tyk's [Request Header Transform](/api-management/traffic-transformation/request-headers) middleware can be configured to add this header to the request prior to it being proxied to the upstream. To enhance security by restricting visibility of the access token, the key/token can be stored in a [key-value store](/tyk-self-managed/install), with only the reference included in the middleware configuration. + + + diff --git a/api-management/upstream-authentication/basic-auth.mdx b/api-management/upstream-authentication/basic-auth.mdx new file mode 100644 index 000000000..2694ee1d7 --- /dev/null +++ b/api-management/upstream-authentication/basic-auth.mdx @@ -0,0 +1,127 @@ +--- +title: "Upstream Authentication using Basic Auth" +description: "How to authenticate upstream service basic authentication" +keywords: "security, upstream authentication, gateway to upstream, basic auth" +sidebarTitle: "Basic Auth" +--- + +## Availability + +| Component | Editions | +| :----------- | :---------- | +| Gateway and Dashboard | Enterprise | + +## Basic Authentication + +Basic Authentication is a standard authentication mechanism implemented by HTTP servers, clients and web browsers. This makes it an excellent access control method for smaller APIs. + +An API request made using Basic Authentication will have an `Authorization` header that contains the client's credentials in the form: `Basic `. + +The `` are a base64 encoded concatenation of a client username and password, joined by a single colon `:`. + +Tyk supports Basic Authentication as a method for authenticating **clients** with the **Gateway** - you can use Tyk Gateway or Dashboard to create Basic Auth users, as explained in the [documentation](/api-management/authentication/basic-authentication#registering-basic-authentication-user-credentials-with-tyk). + +If your **upstream service** is protected using Basic Authentication then similarly, Tyk will need to provide user credentials, registered with the upstream, in the request. + +### How to use Upstream Basic Authentication + +If your upstream service requires that Tyk authenticates using Basic Authentication, you will first need to obtain a valid username and password from the server. To enhance security by restricting visibility of the credentials, these can be stored in a [key-value store](/tyk-self-managed/install), with only references included in the API definition. + +If the incoming request from the client already has credentials in the `Authorization` header, then Tyk will replace those with the basic auth credentials before proxying onwards to the upstream. + +Sometimes a non-standard upstream server might require the authentication credentials to be provided in a different header (i.e. not `Authorization`). With Tyk, you can easily configure a custom header to be used for the credentials if required. + +Upstream Basic Authentication is only supported by Tyk OAS APIs. If you are using Tyk Classic APIs, you could create the client credential offline and add the `Authorization` header using the [Request Header Transform](/api-management/traffic-transformation/request-headers) middleware. + +#### Configuring Upstream Basic Auth in the Tyk OAS API definition + +Upstream Authentication is configured per-API in the Tyk extension (`x-tyk-api-gateway`) within the Tyk OAS API definition by adding the `authentication` section within the `upstream` section. + +Set `upstream.authentication.enabled` to `true` to enable upstream authentication. + +For Basic Authentication, you will need to add the `basicAuth` section within `upstream.authentication`. + +This has the following parameters: +- `enabled` set this to `true` to enable upstream basic authentication +- `username` is the username to be used in the request *credentials* +- `password` is the password to be used in the request *credentials* +- `header.enabled` must be set to `true` if your upstream expects the *credentials* to be in a custom header, otherwise it can be omitted to use `Authorization` header +- `header.name` is the custom header to be used if `header.enabled` is set to `true` + +Note that if you use the [Tyk API Designer](#configuring-upstream-basic-auth-using-the-api-designer) in Tyk Dashboard it will always configure the `header` parameter - even if you are using the default `Authorization` value. + +For example: + +```json {hl_lines=["43-54"],linenos=true, linenostart=1} +{ + "info": { + "title": "example-upstream-basic-auth", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://localhost:8181/example-upstream-basic-auth/" + } + ], + "security": [], + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "components": { + "securitySchemes": {} + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-upstream-basic-auth", + "state": { + "active": true + } + }, + "server": { + "listenPath": { + "strip": true, + "value": "/example-upstream-basic-auth/" + } + }, + "upstream": { + "url": "https://httpbin.org/basic-auth/myUsername/mySecret", + "authentication": { + "enabled": true, + "basicAuth": { + "password": "mySecret", + "username": "myUsername", + "enabled": true, + "header": { + "enabled": true, + "name": "Authorization" + } + } + } + } + } +} +``` + +In this example upstream authentication has been enabled (line 44). Requests will be proxied to the `GET /basic-auth` endpoint at httpbin.org using the credentials in lines 46 and 47 (username: myUsername, password: mySecret). These credentials will be combined, base64 encoded and then provided in the `Authorization` header, as required by the httpbin.org [documentation](https://httpbin.org/#/Auth/get_basic_auth__user___passwd_"). + +The configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the Upstream Basic Authentication feature. + +#### Configuring Upstream Basic Auth using the API Designer + +Upstream Authentication is configured from the **Settings** tab of the Tyk OAS API Designer, where there is a dedicated section within the **Upstream** section. + +Select **Basic Auth** from the choice in the **Authentication Method** drop-down, then you can provide the client credentials and header name. + +Tyk OAS API Designer showing Upstream Basic Auth configuration options + +
diff --git a/api-management/upstream-authentication/mtls.mdx b/api-management/upstream-authentication/mtls.mdx new file mode 100644 index 000000000..6e1d01930 --- /dev/null +++ b/api-management/upstream-authentication/mtls.mdx @@ -0,0 +1,456 @@ +--- +title: "Upstream Authentication using Mutual TLS" +description: "How to authenticate upstream service using mutual tls" +keywords: "security, upstream authentication, gateway to upstream, mTLS, mutual tls" +sidebarTitle: "Mutual TLS" +--- + +## Mutual TLS (mTLS) + +If your upstream API is protected with [mutual TLS](/basic-config-and-security/security/mutual-tls/client-mtls#why-use-mutual-tls) then Tyk must provide a certificate when connecting to the upstream service and also will need to verify the certificate presented by the upstream. This ensures secure communication between Tyk and your upstream services. + +When Tyk performs an mTLS handshake with an upstream, it needs to know: + +- which client certificate Tyk should use to identify itself +- which public key (certificate) that Tyk should use to verify the identity of the upstream + +We use a system of [mapping certificates](#mapping-certificates-to-domains) to upstreams based on their host domain. This is used for both the [client certificate](#upstream-client-certificates) and, optionally, for the [upstream public key](#upstream-server-certificates) if we want to use specific certificates to protect against compromised certificate authorities (CAs). + +#### Upstream mTLS for Tyk middleware and plugins + +If upstream mTLS certificates are configured for an API, they will not be used for direct proxies to the upstream and will also automatically be used for any HTTP requests made from the [JavaScript Virtual Endpoint](/api-management/traffic-transformation/virtual-endpoints) middleware. They will **not** be used for HTTP requests from custom plugins. + + +#### Upstream mTLS for Tyk Cloud + +All Tyk Cloud users can secure their upstream services with mTLS + +### Mapping certificates to domains + +Tyk maintains mappings of certificates to domains (which can include the port if a non-standard HTTP port is used). Separate maps can be declared globally, to be applied to all APIs, and at the API level for more granular control. The granular API level mapping takes precedence if both are configured. Within each mapping, both default and specific maps can be defined, giving ultimate flexibility. + +When Tyk performs an mTLS handshake with an upstream, it will check if there are certificates mapped to the domain: + +- first it will check in the API definition for a specific certificate +- then it will check in the API definition if there is a default certificate +- then it will check at the Gateway level for a specific certificate +- then it will check at the Gateway level for a default certificate + +Certificates are identified in the mapping using the [Certificate Id](/api-management/certificates#certificate-management) assigned by the Tyk Certificate Store, for example: `{"": ""}`. + +When mapping a certificate to a domain: + +- do not include the protocol (e.g. `https://`) +- include the port if a non-standard HTTP port is in use +- you can use the `*` wildcard - either in place of the whole domain or as part of the domain name + +For example, to map a certificate with Id `certId` to an upstream service located at `https://api.production.myservice.com:8443` you could map the certificate as: + +- `{"api.production.myservice.com:8443": "certId"}` +- `{"*.production.myservice.com:8443": "certId"}` +- `{"api.*.myservice.com:8443": "certId"}` + +Note that when using the wildcard (`*`) to replace part of the domain name, it can only represent one fragment so, using our example, you would not achieve the same mapping using `{"*.myservice.com:8443": "certId"}`. + + +A *default* certificate to be used for all upstream requests can be mapped by replacing the specific domain with the wildcard, for example `{"*", "certId"}`. + + +### Upstream client certificates + +Tyk can be configured to proxy requests to a single API on to different upstream hosts (for example via load balancing, API versions or URL rewrite middleware). You can configure Tyk to present specific client certificates to specific hosts, and you can specify a default certificate to be usedfor all upstream hosts. + +The upstream service uses the public key (from the certificate presented by Tyk) to verify the signed data, confirming that Tyk possesses the corresponding private key. + +All certificates are retrieved from the [Tyk Certificate Store](/api-management/certificates#certificate-management) when the proxy occurs. + +#### Mapping client certificates at the Gateway level + +You can map certificates to domains using the [security.certificates.upstream](/tyk-oss-gateway/configuration#securitycertificatesupstream) field in your Gateway configuration file. + +Mapping a certificate to domain `*` will ensure that this certificate will be used in all upstream requests where no other certificate is mapped (at Gateway or API level). + +#### Mapping client certificates at the API level + +You can map certificates to domains using the [upstream.mutualTLS](/api-management/gateway-config-tyk-oas#mutualtls) object (Tyk Classic: `upstream_certificates`) in your API definition. + +Mapping a certificate to domain `*` will ensure that this certificate will be used in all upstream requests where no other certificate is mapped in the API definition. + + +### Upstream server certificates + +Tyk will verify the certificate received from the upstream by performing the following checks: + +- Check that it's issued by a trusted CA +- Check that the certificate hasn't expired +- Verify the certificate's digital signature using the public key from the certificate + + + + + Tyk will look in the system trust store for the server that is running Tyk Gateway (typically `/etc/ssl/certs`). If you are using self-signed certificates, store them here so that Tyk can verify the upstream service. + + + +If you want to restrict the public keys that can be used by the upstream service, then you can use [certificate pinning](/api-management/upstream-authentication/mtls#certificate-pinning) to store a list of certificates that Tyk will use to verify the upstream. + +#### Certificate Pinning + +Tyk provides the facility to allow only certificates generated from specific public keys to be accepted from the upstream services during the mTLS exchange. This is called "certificate pinning" because you *pin* a specific public certificate to an upstream service (domain) and Tyk will only use this to verify connections to that domain. This helps to protect against compromised certificate authorities. You can pin one or more public keys per domain. + +The public keys must be stored in PEM format in the [Tyk Certificate Store](/api-management/certificates#certificate-management). + +##### Configuring Certificate Pinning at the Gateway level + +If you want to lock down the public certificates that can be used in mTLS handshakes for specific upstream domains across all APIs, you can pin public certificates to domains using the [security.pinned_public_keys](/tyk-oss-gateway/configuration#securitypinned_public_keys) field in your Gateway configuration file. + +This accepts a map of domain addresses to certificates in the same way as for the client certificates. Wildcards are supported in the domain addresses. Pinning one or more certificates to domain `*` will ensure that only these certificates will be used to verify the upstream service during the mTLS handshake. + +##### Configuring Certificate Pinning at the API level + +Restricting the certificates that can be used by the upstream for specific APIs is simply a matter of registering a map of domain addresses to certificates in the [upstream.certificatePinning](/api-management/gateway-config-tyk-oas#certificatepinning) object in the API definition (Tyk Classic: `pinned_public_keys`). + + +### Overriding mTLS for non-production environments + +When you are developing or testing an API, your upstream might not have the correct certificates that are deployed for your production service. This could cause problems when integrating with Tyk. + +You can use the [proxy.transport.insecureSkipVerify](/api-management/gateway-config-tyk-oas#tlstransport) option in the API definition (Tyk Classic: `proxy.transport.ssl_insecure_skip_verify`) to instruct Tyk to ignore the certificate verification stage for a specific API. + +If you want to ignore upstream certificate verification for all APIs deployed on Tyk, you can use the [proxy_ssl_insecure_skip_verify](/tyk-oss-gateway/configuration#proxy_ssl_insecure_skip_verify) option in the Tyk Gateway configuration. + +These are labelled *insecure* with good reason and should never be configured in production. + + +### Using Tyk Dashboard to configure upstream mTLS + +Using the Tyk Dashboard, you can enable upstream mTLS from the **Upstream** section in the API Designer: + +Enable upstream mTLS + +Click on **Attach Certificate** to open the certificate attachment window: + +Attach a certificate to an API + +This is where you can define the upstream **Domain Name** and either select an existing certificate from the Tyk Certificate Store, or upload a new certificate to the store. + +If you want to [pin the public certificates](/api-management/upstream-authentication/mtls#certificate-pinning) that can be used by Tyk when verifying the upstream service, then you should enable **Public certificates** and attach certificates in the same manner as for the client certificates: + +Enable public key pinning + +For details on managing certificates with Tyk, please see the [certificate management](/api-management/certificates#certificate-management) documentation. + +For Tyk Classic APIs, the **Upstream Certificates** controls are on the **Advanced Options** tab of the Tyk Classic API Designer. + + +### Using Tyk Operator to configure mTLS + + + + +Configure upstream mTLS client certificates using the `mutualTLS` field in the `TykOasApiDefinition` object when using Tyk Operator, for example: + +```yaml{hl_lines=["12-18"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 + kind: TykOasApiDefinition + metadata: + name: petstore + namespace: default + spec: + tykOAS: + configmapRef: + name: petstore + namespace: default + keyName: petstore.json + mutualTLS: + enabled: true + domainToCertificateMapping: + - domain: "petstore.com" + certificateRef: petstore-domain + - domain: "petstore.co.uk" + certificateRef: petstore-uk-domain +``` + + + + +Tyk Operator supports certificate pinning in the Tyk OAS custom resource, allowing you to secure your API by pinning a public key stored in a secret to a specific domain. + +Example of public keys pinning + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm + namespace: default +data: + test_oas.json: |- + { + "info": { + "title": "httpbin with certificate pinning", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": {}, + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "httpbin with certificate pinning", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://httpbin.org/" + }, + "server": { + "listenPath": { + "value": "/httpbin/", + "strip": true + } + } + } + } +--- +apiVersion: v1 +kind: Secret +metadata: + name: domain-secret +type: kubernetes.io/tls # The secret needs to be a type of kubernetes.io/tls +data: + tls.crt: + tls.key: "" +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: "oas-pinned-public-keys" +spec: + tykOAS: + configmapRef: + keyName: test_oas.json + name: cm + certificatePinning: + enabled: true + domainToPublicKeysMapping: + - domain: "httpbin.org" + publicKeyRefs: + - domain-secret +``` + +This example demonstrates how to enable certificate pinning for the domain `httpbin.org` using a public key stored in a Kubernetes secret (`domain-secret`). + + + + +### Using Tyk Operator to configure mTLS for Tyk Classic APIs + + + +When using Tyk Classic APIs with Tyk Operator, you can configure upstream client certificates for mTLS using one of the following fields within the ApiDefinition object: + +- **upstream_certificate_refs**: Configure using certificates stored within Kubernetes secret objects. +- **upstream_certificates**: Configure using certificates stored within Tyk Dashboard's certificate store. + +**upstream_certificate_refs** + +The `upstream_certificate_refs` field can be used to configure certificates for different domains. References can be held to multiple secrets which are used for the domain mentioned in the key. Currently "*" is used as a wildcard for all the domains + +The example listed below shows that the certificate in the secret, *my-test-tls*, is used for all domains. + +```yaml +# First apply this manifest using the command +# "kubectl apply -f config/samples/httpbin_upstream_cert.yaml" +# +# The operator will try to create the ApiDefinition and will succeed but will log an error that a certificate is missing +# in the cluster for an upstream +# +# Generate your public-private key pair , for test you can use the following command to obtain one fast: +# "openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out tls.crt -keyout tls.key" +# +# Run the following command to obtain the values that must be put inside the yaml that contians the secret resource: +# "kubectl create secret tls my-test-tls --key="tls.key" --cert="tls.crt" -n default -o yaml --dry-run=client" +# +# Apply your TLS certificate using the following command: (we already have an example one in our repo) +# "kubectl apply -f config/sample/simple_tls_secret.yaml" +# +# NOTE: the upstream_certificate_refs can hold references to multiple secrets which are used for the domain +# mentioned in the key (currently "*" is used as a wildcard for all the domains) +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + upstream_certificate_refs: + "*": my-test-tls + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default +``` + +A secret can be created and output in yaml format using the following command: + +```bash +kubectl create secret tls my-test-tls --key="keyfile.key" --cert="certfile.crt" -n default -o yaml --dry-run=client +kubectl apply -f path/to/your/tls_secret.yaml +``` + +**upstream_certificates** + +The `upstream_certificates` field allows certificates uploaded to the certificate store in Tyk Dashboard to be referenced in the Api Definition: + +```yaml +# Skip the concatenation and .pem file creation if you already have a certificate in the correct format + +# First generate your public-private key pair , for test use you can use the following command to obtain one fast: +# "openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out tls.crt -keyout tls.key" + +# Concatenate the above files to obtain a .pem file which we will upload using the dashboard UI +# "cat tls.crt tls.key > cert.pem" + +# Upload it to the tyk certificate store using the dashboard + +# Fill in the manifest with the certificate id (the long hash) that you see is given to it in the dashboard +# (in place of "INSERT UPLOADED CERTIFICATE NAME FROM DASHBOARD HERE") +# Optional: Change the domain from "*" to something more specific if you need to use different +# upstream certificates for different domains + +# Then apply this manifest using the command +# "kubectl apply -f config/samples/httpbin_upstream_cert_manual.yaml" + +# The operator will try create the ApiDefinition and will succeed and it will have the requested domain upstream certificate +# in the cluster for an upstream + +# NOTE: the upstream_certificate can hold multiple domain-certificateName pairs +# (currently "*" is used as a wildcard for all the domains) + +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + upstream_certificates: + "*": #INSERT UPLOADED CERTIFICATE NAME FROM DASHBOARD HERE# + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default +``` + + + + +When using Tyk Classic APIs with Tyk Operator you can configure certificate pinning using one of the following fields within the ApiDefinition object: + +- **pinned_public_keys**: Use public keys uploaded via the Certificate API. +- **pinned_public_keys_refs**: Uses public keys configured from Kubernetes secret objects. + +###### pinned_public_keys + +Use the `pinned_public_keys` mapping to pin public keys to specific domains, referencing public keys that have been uploaded to Tyk Certificate storage via the Certificate API. + +```yaml +pinned_public_keys: + "foo.com": "", + "*": "," +``` + +Each `key-id` value should be set to the ID returned from uploading the public key via the Certificate API. Multiple public keys can be specified by separating their IDs by a comma. + +
+ +###### pinned_public_keys_refs + +The `pinned_public_keys_refs` mapping should be used to configure pinning of public keys sourced from Kubernetes secret objects for different domains. + +Each key should be set to the name of the domain and the value should refer to the name of a Kuberenetes secret object that holds the corresponding public key for that domain. + +Wildcard domains are supported and "*" can be used to denote all domains. + + + +**Caveats** + +- Only *kubernetes.io/tls* secret objects are allowed. +- Please use the *tls.crt* field for the public key. +- The secret that includes a public key must be in the same namespace as the ApiDefinition object. + + + +The example below illustrates a scenario where the public key from the Kubernetes secret object, *httpbin-secret*, is used for all domains, denoted by the wildcard character '*'. In this example the *tls.crt* field of the secret is set to the actual public key of *httpbin.org*. Subsequently, if you any URL other than https://httpbin.org is targetted (e.g. https://github.com/) a *public key pinning error* will be raised for that particular domain. This is because the public key of *httpbin.org* has been configured for all domains. + +```yaml +# ApiDefinition object 'pinned_public_keys_refs' field uses the following format: +# spec: +# pinned_public_keys_refs: +# "domain.org": # the name of the Kubernetes Secret Object that holds the public key for the 'domain.org'. +# +# In this way, you can refer to Kubernetes Secret Objects through 'pinned_public_keys_refs' field. +# +# In this example, we have an HTTPS upstream target as `https://httpbin.org`. The public key of httpbin.org is obtained +# with the following command: +# $ openssl s_client -connect httpbin.org:443 -servername httpbin.org 2>/dev/null | openssl x509 -pubkey -noout +# +# Note: Please set tls.crt field of your secret to actual public key of httpbin.org. +# +# We are creating a secret called 'httpbin-secret'. In the 'tls.crt' field of the secret, we are specifying the public key of the +# httpbin.org obtained through above `openssl` command, in the decoded manner. +# +apiVersion: v1 +kind: Secret +metadata: + name: httpbin-secret +type: kubernetes.io/tls +data: + tls.crt: # Use tls.crt field for the public key. + tls.key: "" +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-certificate-pinning +spec: + name: httpbin - Certificate Pinning + use_keyless: true + protocol: http + active: true + pinned_public_keys_refs: + "*": httpbin-secret + proxy: + target_url: https://httpbin.org + listen_path: /pinning + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default +``` +
+
+ +
+ diff --git a/api-management/upstream-authentication/oauth.mdx b/api-management/upstream-authentication/oauth.mdx new file mode 100644 index 000000000..fba64200f --- /dev/null +++ b/api-management/upstream-authentication/oauth.mdx @@ -0,0 +1,269 @@ +--- +title: "Upstream Authentication using OAuth" +description: "How to authenticate upstream service using oauth" +keywords: "security, upstream authentication, gateway to upstream, oauth" +sidebarTitle: "OAuth 2.0" +--- + +## Availability + +| Component | Editions | +| :----------- | :---------- | +| Gateway and Dashboard | Enterprise | + +## Upstream OAuth 2.0 + +OAuth 2.0 is an open standard authorization protocol that allows services to provide delegated and regulated access to their APIs; critically the user credentials are not shared with the upstream service, instead the client authenticates with a separate Authentication Server which issues a time-limited token that the client can then present to the upstream (Resource Server). The upstream service validates the token against the Authentication Server before granting access to the client. + +The Authentication Server (auth server) has the concept of an OAuth Client - this is equivalent to the client's account on the auth server. There are different ways that a client can authenticate with the auth server, each with their own advantages and disadvantages for different use cases. + +The auth server is often managed by a trusted third party Identity Provider (IdP) such as Okta or Auth0. + +Tyk supports OAuth 2.0 as a method for authenticating **clients** with the **Gateway** - you can use Tyk's own auth server functionality via the [Tyk OAuth 2.0](/api-management/authentication/oauth-2) auth method or obtain the access token via a third party auth server and use the [JWT Auth](/basic-config-and-security/security/authentication-authorization/json-web-tokens) method. + +If your **upstream service** is protected using OAuth 2.0 then similarly, Tyk will need to obtain a valid access token to provide in the request to the upstream. + +Tyk supports two different OAuth grant types for connecting to upstream services: +- [Client credentials](#oauth-client-credentials) +- [Resource owner password credentials](#oauth-resource-owner-password-credentials) + +#### OAuth client credentials + +The client credentials grant relies upon the client providing an id and secret (the *client credentials*) to the auth server. These are checked against the list of OAuth Clients that it holds and, if there is a match, it will issue an access token that instructs the Resource Server which resources that client is authorized to access. For details on configuring Tyk to use Upstream Client Credentials see [below](#configuring-upstream-oauth-20-client-credentials-in-the-tyk-oas-api-definition). + +#### OAuth resource owner password credentials + +The resource owner password credentials grant (also known simply as **Password Grant**) is a flow where the client must provide both their own credentials (client Id and secret) and a username and password identifying the resource owner to the auth server to obtain an access token. Thus the (upstream) resource owner must share credentials directly with the client. This method is considered unsafe and is prohibited in the [OAuth 2.0 Security Best Practice](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-security-topics-13#section-3.4") but is supported by Tyk for use with legacy upstream services. For details on configuring Tyk to use Upstream Password Grant see [below](#configuring-upstream-oauth-20-password-grant-in-the-tyk-oas-api-definition). + +### How to use Upstream OAuth 2.0 for Authentication + +If your upstream service requires that Tyk authenticates via an OAuth auth server, you will first need to obtain credentials for the OAuth Client created in the auth server. You select which grant type to use and provide the required credentials in the API definition. + +To enhance security by restricting visibility of the credentials, these can be stored in a [key-value store](/tyk-self-managed/install), with only references included in the API definition. + +Some auth servers will return *additional metadata* with the access token (for example, the URL of the upstream server that should be addressed using the token if this can vary per client). Tyk can accommodate this using the optional `extraMetadata` field in the API definition. The response from the auth server will be parsed for any fields defined in `extraMetadata`; any matches will be saved to the request context where they can be accessed from other middleware (for our example, the [URL rewrite](/transform-traffic/url-rewriting#url-rewrite-middleware) middleware could be used to modify the upstream target URL). + +#### Configuring Upstream OAuth 2.0 Client Credentials in the Tyk OAS API definition + +Upstream Authentication is configured per-API in the Tyk extension (`x-tyk-api-gateway`) within the Tyk OAS API definition by adding the `authentication` section within the `upstream` section. + +Set `upstream.authentication.enabled` to `true` to enable upstream authentication. + +For OAuth 2.0 Client Credentials, you will need to add the `oauth` section within `upstream.authentication`. + +This has the following parameters: +- `enabled` set this to `true` to enable upstream OAuth authentication +- `allowedAuthorizeTypes` should include the value `clientCredentials` +- `clientCredentials` should be configured with: + - `tokenUrl` is the URL of the `/token` endpoint on the *auth server* + - `clientId` is the client ID to be provided to the *auth server* + - `clientSecret` is the client secret to be provided to the *auth server* + - `scopes` is an optional array of authorization scopes to be requested + - `extraMetadata` is an optional array of additional fields to be extracted from the *auth server* response + - `header.enabled` must be set to `true` if your upstream expects the credentials to be in a custom header, otherwise it can be omitted to use `Authorization` header + - `header.name` is the custom header to be used if `header.enabled` is set to `true` + +Note that if you use the [Tyk API Designer](/api-management/upstream-authentication/basic-auth#configuring-upstream-basic-auth-using-the-api-designer) in Tyk Dashboard it will always configure the `header` parameter - even if you are using the default `Authorization` value. + +For example: + +```json {hl_lines=["43-62"],linenos=true, linenostart=1} +{ + "info": { + "title": "example-upstream-client-credentials", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://localhost:8181/example-upstream-client-credentials/" + } + ], + "security": [], + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "components": { + "securitySchemes": {} + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-upstream-client-credentials", + "state": { + "active": true + } + }, + "server": { + "listenPath": { + "strip": true, + "value": "/example-upstream-client-credentials/" + } + }, + "upstream": { + "url": "https://httpbin.org/", + "authentication": { + "enabled": true, + "oauth": { + "enabled": true, + "allowedAuthorizeTypes": [ + "clientCredentials" + ], + "clientCredentials": { + "header": { + "enabled": true, + "name": "Authorization" + }, + "tokenUrl": "http:///token", + "clientId": "client123", + "clientSecret": "secret123", + "scopes": ["scope1"], + "extraMetadata": ["instance_url"] + } + } + } + } + } +} +``` + +In this example upstream authentication has been enabled (line 44). The authentication method to be used is indicated in lines 46 (OAuth) and 48 (client credentials). When a request is made to the API, Tyk will request an access token from the *authorization server* at `http://` providing client credentials and the scope `scope1`. + +Tyk will parse the response from the *authorization server* for the key `instance_url`, storing any value found in the *request context* were it can be accessed by other middleware as `$tyk_context.instance_url` (note the rules on accessing [request context variables from middleware](/api-management/traffic-transformation/request-context-variables)). + +On receipt of an access token from the *authorization server*, Tyk will proxy the original request to the upstream server (`https://httpbin.org/`) passing the access token in the `Authorization` header. + +If you replace the `upstream.url` and *authorization server* details with valid details, then the configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the Upstream OAuth 2.0 Client Credentials feature. + +#### Configuring Upstream OAuth 2.0 Client Credentials using the API Designer + +Upstream Authentication is configured from the **Settings** tab of the Tyk OAS API Designer, where there is a dedicated section within the **Upstream** section. + +Select **OAuth** from the choice in the **Authentication Method** drop-down, then you can provide the header name, authorization server token URL and select **Client Credentials** to reveal the configuration for the credentials to be passed to the auth server. + +Tyk OAS API Designer showing Upstream OAuth client credentials configuration options + +#### Configuring Upstream OAuth 2.0 Password Grant in the Tyk OAS API definition + +Upstream Authentication is configured per-API in the Tyk extension (`x-tyk-api-gateway`) within the Tyk OAS API definition by adding the `authentication` section within the `upstream` section. + +Set `upstream.authentication.enabled` to `true` to enable upstream authentication. + +For OAuth 2.0 Resource Owner Password Credentials (*Password Grant*), you will need to add the `oauth` section within `upstream.authentication`. + +This has the following parameters: +- `enabled` set this to `true` to enable upstream OAuth authentication +- `allowedAuthorizeTypes` should include the value `password` +- `password` should be configured with: + - `tokenUrl` is the URL of the `/token` endpoint on the *auth server* + - `clientId` is the client ID to be provided to the *auth server* + - `clientSecret` is the client secret to be provided to the *auth server* + - `username` is the Resource Owner username to be provided to the *auth server* + - `password` is the Resource Owner password to be provided to the *auth server* + - `scopes` is an optional array of authorization scopes to be requested + - `extraMetadata` is an optional array of additional fields to be extracted from the *auth server* response + - `header.enabled` must be set to `true` if your upstream expects the credentials to be in a custom header, otherwise it can be omitted to use `Authorization` header + - `header.name` is the custom header to be used if `header.enabled` is set to `true` + +Note that if you use the [Tyk API Designer](/api-management/upstream-authentication/basic-auth#configuring-upstream-basic-auth-using-the-api-designer) in Tyk Dashboard it will always configure the `header` parameter - even if you are using the default `Authorization` value. + +For example: + +```json {hl_lines=["43-64"],linenos=true, linenostart=1} +{ + "info": { + "title": "example-upstream-password-grant", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://localhost:8181/example-upstream-password-grant/" + } + ], + "security": [], + "paths": { + "/anything": { + "get": { + "operationId": "anythingget", + "responses": { + "200": { + "description": "" + } + } + } + } + }, + "components": { + "securitySchemes": {} + }, + "x-tyk-api-gateway": { + "info": { + "name": "example-upstream-password-grant", + "state": { + "active": true + } + }, + "server": { + "listenPath": { + "strip": true, + "value": "/example-upstream-password-grant/" + } + }, + "upstream": { + "url": "https://httpbin.org/", + "authentication": { + "enabled": true, + "oauth": { + "enabled": true, + "allowedAuthorizeTypes": [ + "password" + ], + "password": { + "header": { + "enabled": true, + "name": "Authorization" + }, + "tokenUrl": "http:///token", + "clientId": "client123", + "clientSecret": "secret123", + "username": "user123", + "password": "pass123", + "scopes": ["scope1"], + "extraMetadata": ["instance_url"] + } + } + } + } + } +} +``` + +In this example upstream authentication has been enabled (line 44). The authentication method to be used is indicated in lines 46 (OAuth) and 48 (password grant). When a request is made to the API, Tyk will request an access token from the *authorization server* at `http://` providing client credentials, resource owner credentials and the scope `scope1`. + +Tyk will parse the response from the *authorization server* for the key `instance_url`, storing any value found in the *request context* were it can be accessed by other middleware as `$tyk_context.instance_url` (note the rules on accessing [request context variables from middleware](/api-management/traffic-transformation/request-context-variables)). + +On receipt of an access token from the *authorization server*, Tyk will proxy the original request to the upstream server (`https://httpbin.org/`) passing the access token in the `Authorization` header. + +If you replace the `upstream.url` and *authorization server* details with valid details, then the configuration above is a complete and valid Tyk OAS API Definition that you can import into Tyk to try out the Upstream OAuth 2.0 Password Grant feature. + +#### Configuring Upstream OAuth 2.0 Password Grant using the API Designer + +Upstream Authentication is configured from the **Settings** tab of the Tyk OAS API Designer, where there is a dedicated section within the **Upstream** section. + +Select **OAuth** from the choice in the **Authentication Method** drop-down, then you can provide the header name, authorization server token URL and select **Resource Owner Password Credentials** to reveal the configuration for the credentials to be passed to the auth server. + +Tyk OAS API Designer showing Upstream OAuth password grant configuration options + + + +Any error encountered in the communication with the OAuth server will generate an `UpstreamOAuthError` event. This event can be used to trigger an event handler, for example you could use a [webhook](/api-management/gateway-events) to alert the system administrator of the issue. + diff --git a/api-management/upstream-authentication/request-signing.mdx b/api-management/upstream-authentication/request-signing.mdx new file mode 100644 index 000000000..3a921c342 --- /dev/null +++ b/api-management/upstream-authentication/request-signing.mdx @@ -0,0 +1,133 @@ +--- +title: "Upstream Authentication using Request Signing" +description: "How to authenticate upstream service using request signing" +keywords: "security, upstream authentication, gateway to upstream, request signing" +sidebarTitle: "Request Signing" +--- + +## Request signing + +Request Signing is an access token method that adds another level of security where the client generates a unique signature that identifies the request temporally to ensure that the request is from the requesting user, using a secret key that is never broadcast over the wire. + +Tyk can apply either the symmetric Hash-Based Message Authentication Code (HMAC) or asymmetric Rivest-Shamir-Adleman (RSA) algorithms when generating the signature for a request to be sent upstream. For HMAC, Tyk supports different options for the hash length. + +The following algorithms are supported: + +| Hashing algorithm | Tyk identifier used in API definition | +| :------------------- | :--------------------------------------- | +| HMAC SHA1 | `hmac-sha1` | +| HMAC SHA256 | `hmac-sha256` | +| HMAC SHA384 | `hmac-sha384` | +| HMAC SHA512 | `hmac-sha512` | +| RSA SHA256 | `rsa-sha256` | + +This feature is implemented using [Draft 10](https://tools.ietf.org/html/draft-cavage-http-signatures-10) RFC. The signatures generated according to this standard are temporal - that is, they include a time stamp. If there is no `Date` header in the request that is to be proxied to the upstream, Tyk will add one. + +### Configuring Request Signing in the API definition +Upstream Authentication is configured per-API in the Tyk Vendor Extension by adding the authentication section within the `upstream` section. + +For Request Signing, you must configure [upstream.authentication.upstreamRequestSigning](/api-management/gateway-config-tyk-oas#upstreamrequestsigning), providing the following settings: + +- the `signatureHeader` in which the signature should be sent (typically `Authorization`) +- the `algorithm` to be used to generate the signature (from the table above) +- the `secret` to be used in the encryption operation +- optional `headers` that Tyk should include in the string that is encrypted to generate the signature +- the `keyId` that the upstream will use to identify Tyk as the client (used for HMAC encryption) +- the `certificateId` that the upstream will use to identify Tyk as the client (used for RSA encryption) + +The Tyk Classic equivalent is [request_signing](/api-management/gateway-config-tyk-classic#upstream-authentication). + +### Configuring Request Signing with Tyk Operator + +When using Tyk Operator, the `certificateId` and `secret` are encapsulated in Kubernetes references: +- `certificateRef`: references a Secret containing the private and secret key. +- `secretRef`: references a Kubernetes Secret that holds the secret used in the encryption operation. + +For example: + +```yaml{linenos=true, linenostart=1, hl_lines=["66-73"]} + apiVersion: v1 + data: + secretKey: cGFzc3dvcmQxMjM= + kind: Secret + metadata: + name: upstream-secret + namespace: default + type: Opaque + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: booking + namespace: default + data: + test_oas.json: |- + { + "info": { + "title": "bin", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": {}, + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "bin", + "state": { + "active": true, + "internal": false + } + }, + "server": { + "listenPath": { + "strip": true, + "value": "/bin/" + } + }, + "upstream": { + "url": "http://httpbin.org/", + "authentication": { + "requestSigning": { + "enabled": true, + "signatureHeader": "Signature", + "algorithm": "hmac-sha256", + "keyId": "random-key-id", + "headers": [], + "secret": "" + } + } + } + } + } + --- + apiVersion: tyk.tyk.io/v1alpha1 + kind: TykOasApiDefinition + metadata: + name: booking + namespace: default + spec: + tykOAS: + configmapRef: + namespace: default + name: booking + keyName: test_oas.json + upstreamRequestSigning: + certificateRef: "" + secretRef: + namespace: default + name: upstream-secret + secretKey: secretKey + algorithm: "hmac-sha256" + keyId: "" + ``` +In this example, a Tyk OAS API was created using the `upstreamRequestSigning` field. It can be broken down as follows: +- `upstreamRequestSigning`: This defines the settings for Upstream Request Signing. in the example manifest, it configures Upstream Request Signing using the `booking` API. + - `certificateRef`: References a Secret containing the private and secret key for signing client API requests. This should be used if `secretRef` is not specified. + - `secretRef`: References a Kubernetes Secret that holds the secret key for signing client requests. + - `algorithm`: Specifies the algorithm used for signing. + - For `secretRef`, supported algorithms include: `hmac-sha1`, `hmac-sha256`, `hmac-sha384`, and `hmac-sha512`. + - For `certificateRef`, the required algorithm is `rsa-sha256`. + - `keyId`: A user-defined key assumed to be available on the upstream service. This is used in the `SignatureHeader` and should be included when using `certificateRef`. It is required when using the RSA algorithm. + +
+ diff --git a/api-management/user-management.mdx b/api-management/user-management.mdx new file mode 100644 index 000000000..6542bbf9a --- /dev/null +++ b/api-management/user-management.mdx @@ -0,0 +1,485 @@ +--- +title: "User management with Tyk Dashboard" +description: "How to manage users, teams, permissions, rbac in Tyk Dashboard" +keywords: "Dashboard, User Management, RBAC, Role Based Access Control, User Groups, Teams, Permissions, API Ownership, SSO, Single Sing On, Multi Tenancy" +sidebarTitle: "User Management" +--- + +import { ButtonLeft } from '/snippets/ButtonLeft.mdx'; + +## Introduction + +Tyk Dashboard provides you with the ability to manage Users, Teams and Permissions enabling organizations to maintain robust control over access and visibility. These capabilities empower teams to manage large-scale API portfolios, mitigate risks of unauthorized access, and reduce operational complexity. + +In this section, we delve into the following key topics: + +1. **[Managing Users](#manage-tyk-dashboard-users)**: + Streamlining user administration by creating, updating, and deactivating accounts within the Tyk Dashboard using both the UI and API. +2. **[Managing User Permissions](#user-permissions)**: + Configuring and enforcing role-based access control for users within the Tyk Dashboard, using both the API and UI. +3. **[Managing User Groups/Teams](#manage-tyk-dashboard-user-groups)**: + Organizing users into groups or teams to simplify role assignment, permissions management, and collaborative workflows within the Tyk Dashboard. +4. **[Managing Passwords and Policy](#manage-user-passwords)**: + Establishing and enforcing password policies, including complexity and expiration settings, to secure user accounts. +5. **[Configuring API Ownership](#api-ownership)**: + Applying role-based access control to APIs to govern visibility and manageability for specific teams or users. +6. **[Managing Users across Multiple Tyk Organizations](#manage-tyk-dashboard-users-in-multiple-organizations)**: + Administering user access and roles across multiple organizations, ensuring consistent and secure management in multi-tenant setups. +7. **[Single Sign-On](#single-sign-on-integration)**: + Integrating and configuring Single Sign-On (SSO) solutions to streamline authentication and enhance security across the Tyk Dashboard. + +
+ + +The availability of some features described in this section depends on your license. +
+For further information, please check our [price comparison](https://tyk.io/price-comparison/) or consult our sales and expert engineers: + +
+ + +## Understanding "User" in Tyk + +In the context of Tyk, a User refers to an individual responsible for managing, configuring, and maintaining the Tyk API Gateway and its related components. These users interact with the Tyk Dashboard and API to control various aspects such as API management, user permissions, security policies, and organizational settings. This term does not refer to end-users or consumers of the APIs managed through Tyk but specifically to administrators and developers operating the Tyk ecosystem. + +## Initial Admin User Creation + +When you start the Tyk Dashboard the first time, the bootstrap process creates an initial "user" for you with admin permissions, which allows them access to control and configure everything in the Dashboard (via the UI or Tyk Dashboard API). + +## Manage Tyk Dashboard Users + +Dashboard users have twofold access to the dashboard: they can access both the Dashboard API and the dashboard itself, it is possible to generate users that have read-only access to certain sections of the dashboard and the underlying API. + +Dashboard users are not the same as developer portal users (a.k.a. [developers](/tyk-developer-portal/tyk-portal-classic/portal-concepts#developers)). The credentials are stored independently and have different mechanics relating to registration, management and access. For example, it is not possible to log into the developer portal using a dashboard account. + +### Using Dashboard UI + +To create a dashboard user from the GUI: + +1. **Select "Users" from the "System Management" section** + + Users menu + +2. **Click "ADD USER"** + + Add user button location + +3. **Add the user's basic details** + + User form + + In this section: + + * **First Name**: The user's first name. + * **Last Name**: The user's last name. + * **Email**: The email address of the user, this will also be their login username. + * **Password**: The password to assign to the user, this will automatically be hashed and salted before storing in the database. **NOTE** you need to inform the user about the password you have created for them. + * **Active**: Must be true for the user to have access to the dashboard or the dashboard API. + +4. **Set the user permissions** + + Admin checkbox location + + You can be very specific with regards to which pages and segments of the Dashboard the user has access to. Some Dashboard pages require access to multiple parts of the API, and so you may get errors if certain related elements are disabled (e.g. APIs + Policies) + + Permissions are set and enforced when they are set on this page. They can either be **read** or **write**. If set to **deny** then the record is non-existent in the object (there is no explicit "deny"). This means that if you set **deny** on all options it looks as if they have not been written, but they will still be enforced so long as even one read or write option has been set. + +5. **Click "Save"** + + Save button location + + The user will automatically be created, as will their API Access token, which you will be able to retrieve by opening the user listing page again and selecting the user's username. + +### Using Dashboard API + +To authenticate requests to the Tyk Dashboard API, you will need to provide an API Key in the `Authorization` header. + +This is your **Tyk Dashboard API Access Credentials**, which can be found on the user detail page: + +API key and RPC key locations + +You can [create a user](/api-management/dashboard-configuration#add-user) with a call to the `POST /api/users` endpoint, for example: + +``` bash +curl -H "Authorization: {YOUR-TYK-DASHBOARD-API-ACCESS-CREDENTIALS}" \ + -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d '{ + "first_name": "Test", + "last_name": "User", + "email_address": "test@testing.com", + "active": true, + "user_permissions": { + "IsAdmin": "admin" + }, + "password": "thisisatest" + }' http://{your-dashboard-host}:{port}/api/users | python -mjson.tool +``` + +In this example, we have given the user Admin privileges. To see a detailed breakdown of permission objects, please see below. + +You will see the following response to confirm that the user has been created: + +```json +{ + "Message": "User created", + "Meta": null, + "Status": "OK" +} +``` + +The user is now active. + +## Manage User Passwords +You can change your password in these circumstances: + +* If you have forgotten your password +* If you wish to change your password + +### Forgotten Your Password? +If you have forgotten your password, you can request a password reset email from the **Dashboard Login** screen: + +password reset email + +Enter your login email address, and you will receive an email with a link that enables you to create a new password. + + + +This link will only be valid for 1000 seconds +
+You will need to configure your [outbound email settings](/configure/outbound-email-configuration) to enable this feature. +
+ + +### Change Your Password +If you wish to change your current password, from the **System Management > Users** screen, select **Edit** for your Username. + + + +You will not be able to change the password for other Dashboard users. + + + +From your user details, click **Reset Password**: + +reset password button +Enter your current and new password (and confirm it) in the dialog box that is displayed, and click **Reset Password**. +You will automatically be logged out of the Dashboard and will have to enter your username and new password to log back in. + +## Manage Tyk Dashboard User Groups + +Tyk has a flexible [user permissions](/api-management/user-management#user-permissions) system that provides Role Based Access Control (RBAC) for your Tyk Dashboard. + +When you have a large number of users and teams with different access requirements, instead of setting permissions per *user*, you can create a *user group* and configure the permissions for all users in the group. For example, if you only want certain *users* to access the Tyk Logs, you could create a "Logs Users" *user group*, then give those users the *Logs Read* permission and add them to your *Logs Users* group. + +Note that **a user can only belong to one group**. + +You must have either *admin* or *user groups* permission to be able to modify user groups. + +This also works for Single Sign-On (SSO), as you can specify the user group ID when setting up SSO. + + + +The availability of this feature depends on your license. +
+For further information, please check our [price comparison](https://tyk.io/price-comparison/) or consult our sales and expert engineers: + +
+ + + +### Using Dashboard UI + +1. **Select "User Groups" from the "System Management" section** + + User group menu + +2. **Click "ADD NEW USER GROUP"** + + Add user group location + +3. **Add User Group Name** + + Enter the name for your User Group, and an optional Description. + + Add name + +4. **Set User Group Permissions** + + Selet the User Group Permissions you want to apply. + + Add permissions + +5. **Click "Save" to create the Group** + + Click Save + +6. **Add Users to your Group** + + 1. From the **Users** menu, select **Edit** from the **Actions** drop-down list for a user to add to the group. + 2. Select your group from the **User group** drop-down list. + + select user group + + Click Update to save the User details + + update user + +### Using Dashboard API + +You can also manage User Groups via our [Dashboard API](/api-management/dashboard-configuration#user-groups-api). The following functions are available: + +* [List all User Groups](/api-management/dashboard-configuration#list-user-groups) +* [Get a User Group via the User Group ID](/api-management/dashboard-configuration#get-user-group) +* [Add a User Group](/api-management/dashboard-configuration#add-user-group) +* [Update a User Group](/api-management/dashboard-configuration#update-user-group) +* [Delete a User Group](/api-management/dashboard-configuration#delete-user-group) + +## Search Users + +You can search for a user (by email address) by entering the address in the search field. The user list will automatically refresh with that user being displayed. + +User Profile Search + +## Password Policy + +Tyk allows you to control password requirements for Dashboard users, developers (i.e. users registered to the developer portal) and basic auth keys. +Please note: This configuration is enforced by the Tyk-Dashboard and as such is not available in the Tyk Open Source Edition. Also, since it requires access to the Tyk Dashboard installation folder, it is *currently* not available for Tyk Cloud clients. + +There are other security options available from the Dashboard config file. See the [security section](/tyk-dashboard/configuration#security) for more details. + +You can find the configuration files in the `schemas` directory of your Tyk Dashboard installation folder, as follows: +- For Dashboard users you define policy in `schemas/password.json` +- For developers you define policy in `schemas/developer_password.json` +- For basic auth keys you define policy in `./schemas/basic_auth.json` + + +The following validators are available: + +* `minLength` - sets minimum password length +* `multiCase` - boolean, upper and lower case characters are required +* `minNumeric` - minimum number of numeric characters +* `minSpecial` - minimum number of special characters, like `@`, `$`, `%` etc. +* `disableSequential` - boolean, disable passwords which include at least 3 sequential characters. For example: `abc`, `123`, `111`, `xxx` etc. + +Below is an example of `password.json` file, with all options turned on: + +```{.copyWrapper} +{ + "title": "User password schema", + "type": "string", + + "minLength": 6, + "multiCase": true, + "minNumeric": 2, + "minSpecial": 2, + "disableSequential": true +} +``` + +## User Permissions + +The Tyk Dashboard is multi-tenant capable and allows granular, role based user access. Users can be assigned specific permissions to ensure that they only have very specific access to the Dashboard pages, and to the underlying API. + +It is important to note that all user roles are defined and enforced **at the Dashboard API level**, and the UI is merely reactive. + +### Admin Users + +An *admin* user has read and write access to all properties. The initial user created during the dashboard's bootstrapping process is automatically assigned the *admin* role. + +There are two configuration parameters that restrict the admin user’s capabilities. For enhanced security, both of these values should be set to `true`: + +- [security.forbid_admin_view_access_token](/tyk-dashboard/configuration#securityforbid_admin_view_access_token): This parameter restricts admin users from viewing other users' Dashboard API Access Credentials, both in the API and the UI. + +- [security.forbid_admin_reset_access_token](/tyk-dashboard/configuration#securityforbid_admin_reset_access_token): This parameter prevents admin users from resetting the access tokens of other users. +### User permissions in the Tyk Dashboard API +The permissions object, which is provided to the Dashboard API has this structure: + +```json +"user_permissions": { + "IsAdmin": "false", + "analytics": "read", + "apis": "write", + "hooks": "write", + "idm": "write", + "keys": "write", + "policy": "write", + "portal": "write", + "system": "write", + "users": "write", + "user_groups": "write", + "audit_logs": "read" + } +``` + +Note that the above list may not be complete as more features and flexibility are added to the Tyk Dashboard. + +The way the permissions object works is that: + - if it contains `"IsAdmin":"true"`, the user is an *admin* + - if it contains no properties, the user is assumed to be an *admin* + - if it contains even just one property, it acts as an allow-list: only the listed properties are allowed + - any non-listed properties are denied + - permissable values for each section (other than `IsAdmin`) are: `read` or `write`; to deny access to a property you must remove the property from the `user_permissions` object + +An *admin* user can be identified either by setting `IsAdmin` to `true` or by setting no properties in the `user_permissions` object. + +### User permissions in the Tyk Dashboard UI + +User permissions are configured in the user detail view: + +Admin account + +The configuration of each property will affect the dashboard navigation, with `denied` sections or screens hidden or disabled. Note that some side-effects can occur if pages that make use of multiple APIs to fetch configuration data cross over e.g. policies and API Definition listings. + +Selecting the **Account is Admin** checkbox from the Dashboard gives the user full access (it has the same [effect](/api-management/user-management#admin-users) as the `IsAdmin` property). + +### Custom User Permissions + +You can create your own custom permissions for use with the [Open Policy Agent (OPA)](/api-management/dashboard-configuration#extend-permissions-using-open-policy-agent-opa) using the [Additional Permissions](/api-management/dashboard-configuration#additional-permissions-api) endpoint in the Tyk Dashboard Admin API. This allows you to add and delete (CRUD) a list of additional (custom) permissions for your Dashboard users. Once created, a custom permission will be added to standard list of user permissions. + +You can also configure these custom permissions in the `security.additional_permissions` [map](/tyk-dashboard/configuration#securityadditional_permissions) in the Tyk Dashboard configuration file. + + +## API Ownership + +API Ownership is the concept of Role Based Access Control applied to the portfolio of APIs deployed on your Tyk Gateways and managed from your Tyk Dashboard. + +If you have multiple teams, where each team maintains its own APIs and you want to limit access of the dashboard to the team level. For each API, you can assign owners, where an owner can be either an individual user or user group. Only owners have access to these APIs, and objects created based on them like policies or analytics. + +### Multi-team setup using API Ownership + + + +The availability of this feature [depends on your license](/api-management/user-management#). + + + +### When to use API Ownership +#### Governing a multi-team API portfolio +API ownership is a key capability when you have multiple API development teams each working on their own suite of APIs. You can use API Ownership to simplify the experience of those developers when accessing Tyk by reducing the "clutter" of APIs that they are not working with, and also to avoid the risk of users accidentally or maliciously interfering with another team's APIs. + +#### Avoiding data leakage between users +The [user owned analytics](/api-management/user-management#owned-analytics) feature allows you to prevent users from seeing the traffic to (and existence of) APIs for which they are not responsible. This reduces the opportunity for data leakage within your business. + +### How API Ownership works +By default, APIs and associated objects (such as policies and Dashboard analytics) are visible to all Tyk Dashboard users. + +A Dashboard User (or User Group) can be assigned as the *owner* of an API, granting that user (or user group) exclusive visibility of and - given appropriate permissions - the ability to manage all the Tyk objects relating to that API, such as policies, key requests and Dashboard analytics. + +Once an API has been assigned an *owner*, all non-owning users will lose visibility of - and access to - that API in the Tyk Dashboard. + +Where there is a conflict, for example when a policy is associated with multiple APIs and the user owns only one of those APIs, the user will have access to the object (though not the other APIs themselves). + +#### API Ownership example +Imagine that you have two APIs: API1, API2. +You have three teams which have different roles and responsibilities as follows: +- **TeamA** which must have access to configure API1 +- **TeamB** which must have access to configure API2 +- **TeamAnalytics** which should only have access to view the analytics for both API1 and API2 + +To implement this structure, you would create three user groups and assign [permissions](/api-management/user-management#user-permissions) as indicated: +- **TeamA** requires `"apis": "write"` +- **TeamB** requires `"apis": "write"` +- **TeamAnalytics** requires `"analytics": "read"` + +Having configured the user groups, you can then assign API ownership as follows: +- API1 - **TeamA**, **TeamAnalytics** +- API2 - **TeamB**, **TeamAnalytics** + +Thus: +**TeamA** will have API `write` access only to API1 +**TeamB** will have API `write` access only to API2 +**TeamAnalytics** will have Analytics `read` access to both APIs + +#### Enabling API Ownership +API Ownership must be enabled in your Tyk Dashboard configuration, which you can do using either of the following approaches: + - set `enable_ownership` to `true` in your `tyk_analytics.conf` + - set the `TYK_DB_ENABLEOWNERSHIP` environment variable to `true` + +#### Owned Analytics +Access to Tyk Dashboard's [traffic analytics](/api-management/dashboard-configuration#traffic-analytics) is controlled via the `analytics` permission in the user or user group access control configuration. The default behavior of this control is to grant or restrict access to all traffic analytics and does not take into account API ownership. + +The additional `owned_analytics` permission was added in Tyk Dashboard v5.1 (and LTS patches v4.0.14 and v5.0.3) to provide more granular access to traffic analytics. By configuring this permission, the user (or user group) will gain visibility only of those analytics that can be filtered by API (due to the method Tyk Pump uses to aggregate the analytics records). + +Currently, only [API Usage](/api-management/dashboard-configuration#activity-by-api) and [Error Counts](/api-management/dashboard-configuration#activity-by-error) are available to users with the `owned_analytics` permission. + +Note that the `owned_analytics` permission depends upon the `analytics` permission being granted (set to `read`) - without this, the more granular control is ignored and the user will not have visibility of any Tyk Dashboard analytics. + +In the Dashboard UI, the control for `owned_analytics` is implemented as a drop-down option (`all` or `owned`) on the `analytics` permission. +Permissions with API Ownership + +### Managing API owners using the Dashboard UI +The Dashboard UI provides a simple method for managing *ownership* for your APIs, where you can select from the lists of users and user groups that have been created in the Dashboard. Users and user groups are managed in separate lists for ease of use. + +#### Using Tyk OAS APIs +When using Tyk OAS APIs, the option to assign owner(s) to an API is provided on the **Access** tab in the API Designer. You simply select the owner (or owners) that you wish to assign to the API from the drop-down boxes: +API ownership section for Tyk OAS APIs + +You can remove owners from the API by clicking on the `x` next to their name in the drop-down/selection box. + +#### Using Tyk Classic APIs +When using Tyk Classic APIs, the option to assign owner(s) to an API is provided on the **Core Settings** tab in the API Designer. You simply select the owner (or owners) that you wish to assign to the API from the drop-down boxes: +API ownership section for Tyk Classic APIs + +You can remove owners from the API by deselecting them from the drop-down. + +### Managing API owners using the Dashboard API +The [Tyk Dashboard API](/tyk-dashboard-api) provides endpoints to manage API ownership directly, if you are not using the API Designer. + +#### Using Tyk OAS APIs +When working with Tyk OAS APIs, you can manage owners for an API using these endpoints: + +| Method | Endpoint path | Action | +| :-------- | :------------------------- | :---------------------------------------------------------------------------------------- | +| `PUT` | `/api/apis/{apiID}/access` | Assign a list of owners to the specified API | +| `GET` | `/api/apis/{apiID}/access` | Retrieve the list of owners of the specified API | + +For each of these endpoints, the payload consists of two string lists: one for user IDs, the other for user group IDs. +```json +{ + "userIds": [ + "string" + ], + "userGroupIds": [ + "string" + ] +} +``` + +#### Using Tyk Classic APIs +When working with Tyk Classic APIs, you manage owners for an API by modifying the `user_owners` and `user_group_owners` fields in the API definition and then updating the API in Tyk with that using these endpoints: + +| Method | Endpoint | Action | +| :-------- | :--------------------- | :----------------------------------------------------------------------------------------------------------------------- | +| `PUT` | `/api/apis/{apiID}` | Update the API definition for the specified API - CRUD API owners in the `user_owners` and `user_group_owners` fields | +| `GET` | `/api/apis/{apiID}` | Retrieve the API definition for the specified API - ownership details are included in `user_owners` and `user_group_owners` fields | + +## Manage Tyk Dashboard Users in Multiple Organizations + +If you have deployed multiple [Tyk Organizations](/api-management/dashboard-configuration#organizations), you may have users that need access to more than one Organization (known as a "multi-org user"). **This functionality requires a specific Tyk license.** + +To support multi-org users, you must first enable the feature in your Dashboard configuration by setting either of the following to `true`: + - `"enable_multi_org_users"` in `tyk_analytics.conf` + - `TYK_DB_ENABLEMULTIORGUSERS` environment variable + +You then must create users in both Organizations with identical credentials. + +During the login flow the user will see an additional page asking them to pick which available Organization they wish to log into. Once logged in, the user will have an additional drop-down in the top right navigation menu allowing them to switch between Organizations quickly. + + + +A user that does not belong to an Organization is sometimes referred to as an *unbounded user*. These users have visibility across all Organizations, but should be granted read-only access. + + + +## Single Sign-On integration +You can integrate your existing identity management server with the Tyk Dashboard, as explained in our detailed [Single Sign-On (SSO) guide](/api-management/external-service-integration#single-sign-on-sso). **This functionality is available with all Tyk licenses except Tyk Classic Cloud.** + +By default all users who login via SSO are granted admin permissions. You can change this behavior by setting either default permissions for *[users](/api-management/user-management#manage-tyk-dashboard-users)* or by creating a default *[user group](/api-management/user-management#manage-tyk-dashboard-user-groups)* to which all new users are assigned. With some IDPs you can automatically assign different SSO users to different *user groups* by dynamically mapping the IDP's user groups, for example with [Azure AD](/api-management/single-sign-on-oidc#user-group-mapping). + +If you want to maintain an individual set of permissions for your SSO users, you must first enable SSO user lookup in your Dashboard configuration by setting either of the following to `true`: + - `"sso_enable_user_lookup"` in `tyk_analytics.conf` + - `TYK_DB_SSOENABLEUSERLOOKUP` environment variable + +You must then create a *user* in the Dashboard with the required permissions and matching email address. During the SSO login flow, if a user with the same email address is found in the existing organization, their permissions are applied. diff --git a/apim.mdx b/apim.mdx new file mode 100644 index 000000000..99c617975 --- /dev/null +++ b/apim.mdx @@ -0,0 +1,53 @@ +--- +title: "Tyk API Management Deployment Options" +description: "How to decide on which Tyk deployment option is best for you" +keywords: "Tyk API Management, Licensing, Open Source, Self-Managed, Tyk Cloud, API Gateway" +sidebarTitle: "Deployment Options" +--- + +import { ButtonLeft } from '/snippets/ButtonLeft.mdx'; +import SelfManagedLicensingInclude from '/snippets/self-managed-licensing-include.mdx'; + +Tyk API Platform offers various deployment options, consisting of both [open source and proprietary](/tyk-stack) +components. + +Choosing the right one for your organization depends on your specific requirements and preferences. +
Don’t hesitate to contact us for assistance + +| | [Open Source](/tyk-open-source) | [Self-Managed](/tyk-self-managed/install) | [Cloud](https://account.cloud-ara.tyk.io/signup) +|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------|-------------------|--------- +| API Gateway Capabilities
  • Rate Limiting
  • Authentication
  • API Versioning
  • Granular Access Control
  • GraphQL
  • and [much more](/tyk-open-source)
| βœ… |βœ… |βœ… +| [Version Control](/api-management/automations/sync) Integration | - |βœ… |βœ… +| [API Analytics Exporter](/api-management/tyk-pump) | βœ… |βœ… |βœ… +| [Tyk Dashboard](/api-management/dashboard-configuration) | - |βœ… |βœ… +| [Single Sign On (SSO)](/api-management/external-service-integration#single-sign-on-sso) | - |βœ… |βœ… +| [RBAC and API Teams](/api-management/user-management#) | - |βœ… |βœ… +| [Universal Data Graph](/api-management/data-graph#overview) | - |βœ… |βœ… +| [Multi-Tenant](/api-management/dashboard-configuration#organizations) | - |βœ… |βœ… +| [Multi-Data Center](/api-management/mdcb#managing-geographically-distributed-gateways-to-minimize-latency-and-protect-data-sovereignty) | - |βœ… |βœ… +| [Developer Portal](/portal/overview/intro) | - |βœ… |βœ… +| [Developer API Analytics](/api-management/dashboard-configuration#traffic-analytics) | - |βœ… |βœ… +| Hybrid Deployments | - |- |βœ… +| Fully-Managed SaaS | - |- |βœ… +| [HIPAA, SOC2, PCI](https://tyk.io/governance-and-auditing/) | βœ… |βœ… | - + + +## Licensing + +### Self-managed (On-Prem) + + + +### Cloud (Software as a Service / SaaS) + +Tyk cloud is a fully managed service that makes it easy for API teams to create, secure, publish and maintain APIs at any scale, anywhere in the world. Tyk Cloud includes everything you need to manage your global API ecosystem. + +Get your free account [here](https://tyk.io/sign-up/). + +### Open Source (OSS) + +The Tyk Gateway is the backbone of all our solutions and can be deployed for free, forever. It offers various [installation options](/apim/open-source/installation) to suit different needs. + +Visit the [OSS section](/tyk-open-source) for more information on it and other open source components. + +Explore the various open and closed source [Tyk components](/tyk-stack) that are part of the Tyk platform solutions. diff --git a/apim/open-source/installation.mdx b/apim/open-source/installation.mdx new file mode 100644 index 000000000..0cd3df8c7 --- /dev/null +++ b/apim/open-source/installation.mdx @@ -0,0 +1,742 @@ +--- +title: "Installation Options for Tyk Gateway" +description: "This page serves as a comprehensive guide to installing Tyk Gateway Open Source" +keywords: "installation, migration, open source" +sidebarTitle: "Installation Options" +--- + +import { ResponsiveGrid } from '/snippets/ResponsiveGrid.mdx'; + +## Introduction + +The backbone of all our products is our open source Gateway. You can install our Open Source / Community Edition on the following platforms: + + + + + +**Read time: 10 mins** + +Install with Docker. + + + +**Read time: 10 mins** + +Install with K8s. + + + +**Read time: 10 mins** + +Install with Ansible. + + + +**Read time: 10 mins** + +Install on RHEL / CentOS. + + + +**Read time: 10 mins** + +Install on Debian / Ubuntu. + + + +**Read time: 10 mins** + +Visit our Gateway GitHub Repo. + + + + + +## Install Tyk Gateway with Docker + +We will show you two methods of installing our Community Edition Gateway on Docker. +The quickest way to get started is using docker-compose. Visit our [Dockerhub](https://hub.docker.com/u/tykio/) to view the official images. + +### Prerequisites + +The following are required for a Tyk OSS installation: + - Redis - Required for all Tyk installations. + Simple Redis installation instructions are included below. + - MongoDB - Required only if you chose to use the Tyk Pump with your Tyk OSS installation. Same goes with any [other pump data stores](/api-management/tyk-pump#external-data-stores) you choose to use. + +### Steps for Installation + +1. **Create a network** + +```bash +docker network create tyk +``` + +2. **Deploy Redis into the network, with the `6379` port open** + +```bash +docker run -itd --rm --name tyk-redis --network tyk -p 127.0.0.1:6379:6379 redis:4.0-alpine +``` + +3. **Next, let's download a JSON `tyk.conf` configuration file** + +```bash +wget https://raw.githubusercontent.com/TykTechnologies/tyk-gateway-docker/master/tyk.standalone.conf +``` + +4. **Run the Gateway, mounting the conf file into the container** + +```bash +docker run \ + --name tyk_gateway \ + --network tyk \ + -p 8080:8080 \ + -v $(pwd)/tyk.standalone.conf:/opt/tyk-gateway/tyk.conf \ + -v $(pwd)/apps:/opt/tyk-gateway/apps \ + docker.tyk.io/tyk-gateway/tyk-gateway:latest +``` + + +### Test Installation + +Your Tyk Gateway is now configured and ready to use. Confirm this by making a network request to the 'hello' endpoint: + +```curl +curl localhost:8080/hello +``` + +Output should be similar to that shown below: +```json +{"status":"pass","version":"v3.2.1","description":"Tyk GW"} +``` + + +## Install Tyk Gateway with Kubernetes + +The main way to install the Open Source *Tyk Gateway* in a Kubernetes cluster is via Helm charts. +We are actively working to add flexibility and more user flows to our chart. Please reach out +to our teams on support or the community forum if you have questions, requests or suggestions for improvements. + +Get started with our [Quick Start guide](#quick-start-with-helm-chart) or go to [Tyk Open Source helm chart](/product-stack/tyk-charts/tyk-oss-chart) for detailed installation instructions and configuration options. + +### Quick Start with Helm Chart + +At the end of this quick start, Tyk Gateway should be accessible through the service `gateway-svc-tyk-oss-tyk-gateway` at port `8080`. +The following guides provide instructions to install Redis and Tyk Open Source with default configurations. It is intended for a quick start only. For production, you should install and configure Redis separately. + +#### Prerequisites + +1. [Kubernetes 1.19+](https://kubernetes.io/docs/setup/) +2. [Helm 3+](https://helm.sh/docs/intro/install/) + +#### Steps for Installation + +1. **Install Redis and Tyk** + +```bash +NAMESPACE=tyk-oss +APISecret=foo +REDIS_BITNAMI_CHART_VERSION=19.0.2 + +helm repo add tyk-helm https://helm.tyk.io/public/helm/charts/ +helm repo update + +helm upgrade tyk-redis oci://registry-1.docker.io/bitnamicharts/redis -n $NAMESPACE --create-namespace --install --version $REDIS_BITNAMI_CHART_VERSION + +helm upgrade tyk-oss tyk-helm/tyk-oss -n $NAMESPACE --create-namespace \ + --install \ + --set global.secrets.APISecret="$APISecret" \ + --set global.redis.addrs="{tyk-redis-master.$NAMESPACE.svc.cluster.local:6379}" \ + --set global.redis.passSecret.name=tyk-redis \ + --set global.redis.passSecret.keyName=redis-password +``` + +2. **Done!** + +Now Tyk Gateway should be accessible through service `gateway-svc-tyk-oss-tyk-gateway` at port `8080`. + +You are now ready to [create an API](/api-management/gateway-config-managing-classic#create-an-api). + +For the complete installation guide and configuration options, please see [Tyk OSS Helm Chart](/product-stack/tyk-charts/tyk-oss-chart). + +### Configure Legacy Tyk Headless Helm Chart + + +`tyk-headless` chart is deprecated. Please use our Tyk Chart for Tyk Open Source at [tyk-oss](#quick-start-with-helm-chart) instead. + +We recommend all users migrate to the `tyk-oss` Chart. Please review the [Configuration](#quick-start-with-helm-chart) section of the new helm chart and cross-check with your existing configurations while planning for migration. + + + +This is the preferred (and easiest) way to install the Tyk OSS Gateway on Kubernetes. +It will install Tyk gateway in your Kubernetes cluster where you can add and manage APIs directly or via the *Tyk Operator*. + +#### Prerequisites + +The following are required for a Tyk OSS installation: +1. Redis - required for all the Tyk installations and must be installed in the cluster or reachable from inside K8s. + You can find instructions for a simple Redis installation below. +2. MongoDB/SQL - Required only if you choose to use the MongoDB/SQL Tyk pump with your Tyk OSS installation. The same goes for any + [other pump](/api-management/tyk-pump#external-data-stores) you choose to use. +3. Helm - Tyk Helm supports the Helm 3+ version. + +#### Steps for Installation + +As well as our official OSS Helm repo, you can also find it in [ArtifactHub](https://artifacthub.io/packages/helm/tyk-helm/tyk-headless). +[Open in ArtifactHub](https://artifacthub.io/packages/helm/tyk-helm/tyk-headless) + +If you are interested in contributing to our charts, suggesting changes, creating PRs, or any other way, +please use [GitHub Tyk-helm-chart repo](https://github.com/TykTechnologies/tyk-helm-chart/tree/master/tyk-headless) + +1. **Add Tyk official Helm repo** + +```bash +helm repo add tyk-helm https://helm.tyk.io/public/helm/charts/ +helm repo update +``` + +2. **Create a namespace for Tyk deployment** + +```bash +kubectl create namespace tyk +``` + +3. **Getting values.yaml** + +Before we proceed with installation of the chart you may need to set some custom values. +To see what options are configurable on a chart and save those options to a custom `values.yaml` file run: + +```bash +helm show values tyk-helm/tyk-headless > values.yaml +``` + +Some of the necessary configuration parameters will be explained in the next steps. + +4. **Installing Redis** + +* Recommended: via *Bitnami* chart - For Redis, you can use these rather excellent chart provided by Bitnami. +Copy the following commands to add it: + + ```bash + helm repo add bitnami https://charts.bitnami.com/bitnami + helm install tyk-redis bitnami/redis -n tyk --version 19.0.2 + ``` + + + +Please make sure you are installing Redis versions that are supported by Tyk. Please refer to Tyk docs to get a list of [supported versions](/tyk-configuration-reference/redis-cluster-sentinel#supported-versions). + + + +Follow the notes from the installation output to get connection details and password. + +``` + Redis(TM) can be accessed on the following DNS names from within your cluster: + + tyk-redis-master.tyk.svc.cluster.local for read/write operations (port 6379) + tyk-redis-replicas.tyk.svc.cluster.local for read-only operations (port 6379) + + export REDIS_PASSWORD=$(kubectl get secret --namespace tyk tyk-redis -o jsonpath="{.data.redis-password}" | base64 --decode) +``` + +The DNS name of your Redis as set by Bitnami is `tyk-redis-master.tyk.svc.cluster.local:6379` +You can update them in your local `values.yaml` file under `redis.addrs` and `redis.pass` +Alternatively, you can use `--set` flag to set it in the Tyk installation. For example `--set redis.pass=$REDIS_PASSWORD` + +**For evaluation only: Use *simple-redis* chart** + + + +Another option for Redis, to get started quickly, is to use our *simple-redis* chart. +Please note that these provided charts must never be used in production or for anything +but a quick start evaluation only. Use Bitnami Redis or Official Redis Helm chart in any other case. +We provide this chart, so you can quickly deploy *Tyk gateway*, but it is not meant for long-term storage of data. + + + +```bash +helm install redis tyk-helm/simple-redis -n tyk +``` + +5. **Installing Tyk Open Source Gateway** + +```bash +helm install tyk-ce tyk-helm/tyk-headless -f values.yaml -n tyk + ``` + +Please note that by default, Gateway runs as `Deployment` with `ReplicaCount` as 1. You should not update this part because multiple instances of OSS gateways won't sync the API Definition. + +#### Installation Video + +See our short video on how to install the Tyk Open Source Gateway. +Please note that this video shows the use of the Github repository since it was recorded before the official repo was available, However, +it's very similar to the above commands. + + + +#### Pump Installation +By default pump installation is disabled. You can enable it by setting `pump.enabled` to `true` in `values.yaml` file. +Alternatively, you can use `--set pump.enabled=true` while doing Helm install. + +**Quick Pump configuration(Supported from tyk helm v0.10.0)** +*1. Mongo Pump* + +To configure the Mongo pump, make the following changes in `values.yaml` file: +1. Set `backend` to `mongo`. +2. Set connection string in `mongo.mongoURL`. + +*2. Postgres Pump* + +To configure the Postgres pump, make the following changes in `values.yaml` file: +1. Set `backend` to `postgres`. +2. Set connection string parameters in `postgres` section. + +#### Optional - Using TLS +You can turn on the TLS option under the gateway section in your local `values.yaml` file which will make your Gateway +listen on port 443 and load up a dummy certificate. +You can set your own default certificate by replacing the file in the `certs/` folder. + +#### Optional - Mounting Files +To mount files to any of the Tyk stack components, add the following to the mounts array in the section of that component. + +For example: + ```bash + - name: aws-mongo-ssl-cert + filename: rds-combined-ca-bundle.pem + mountPath: /etc/certs +``` + +#### Optional - Tyk Ingress +To set up an ingress for your Tyk Gateways see our [Tyk Operator GitHub repository](https://github.com/TykTechnologies/tyk-operator). + + +## Install Tyk Gateway with Ansible + +### Prerequisites + +1. [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) is required to run the following commands. +2. Ensure port `8080` is open: this is used in this guide for Gateway traffic (the API traffic to be proxied). + +### Steps for Installation +1. Clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repository + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. `cd` into the directory +```.bash +$ cd tyk-ansible +``` + +3. Run the init script to initialize the environment + +```bash +$ sh scripts/init.sh +``` + +4. Modify the `hosts.yml` file to update SSH variables to your server(s). For more information about the host file, visit the [Ansible inventory documentation] (https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install `tyk-ce` + +```bash +$ ansible-playbook playbook.yaml -t tyk-ce -t redis +``` + +You can choose to not install Redis by removing the `-t redis`. However, Redis is a requirement and needs to be installed for the gateway to run. + +### Supported Distributions +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Amazon Linux | 2 | βœ… | +| CentOS | 8 | βœ… | +| CentOS | 7 | βœ… | +| Debian | 10 | βœ… | +| Debian | 9 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | +| Ubuntu | 21 | βœ… | +| Ubuntu | 20 | βœ… | +| Ubuntu | 18 | βœ… | +| Ubuntu | 16 | βœ… | + +### Variables +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| redis.host | | Redis server host if different than the host url | +| redis.port | `6379` | Redis server listening port | +| redis.pass | | Redis server password | +| redis.enableCluster | `false` | Enable if Redis is running in cluster mode | +| redis.storage.database | `0` | Redis server database | +| redis.tls | `false` | Enable if Redis connection is secured with SSL | +| gateway.service.host | | Gateway server host if different than the host url | +| gateway.service.port | `8080` | Gateway server listening port | +| gateway.service.proto | `http` | Gateway server protocol | +| gateway.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.sharding.enabled | `false` | Set to `true` to enable filtering (sharding) of APIs | +| gateway.sharding.tags | | The tags to use when filtering (sharding) Tyk Gateway nodes. Tags are processed as OR operations. If you include a non-filter tag (e.g. an identifier such as `node-id-1`, this will become available to your Dashboard analytics) | + +- `vars/redis.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| redis_bind_interface | `0.0.0.0` | Binding address of Redis | + +Read more about Redis configuration [here](https://github.com/geerlingguy/ansible-role-redis). + +## Install Tyk Gateway with Ubuntu + +The Tyk Gateway can be installed following different installation methods including *Ansible* and *Shell*. Please select by clicking the tab with the installation path most suitable for you. + +### Install Tyk Gateway On Ubuntu Through Shell + +#### + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Debian | 11 | βœ… | +| Ubuntu | 20 | βœ… | +| Ubuntu | 18 | βœ… | +| Ubuntu | 16 | βœ… | + +#### Prerequisites + +1. Ensure port `8080` is open: this is used in this guide for Gateway traffic (the API traffic to be proxied). + +#### Steps for Installation + +1. **Install Redis** + +```console +$ sudo apt-get install -y redis-server +``` + +2. **First import the public key as required by Ubuntu APT** + +```console +$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927 +``` + +3. **Run Installation Scripts via our PackageCloud Repositories** + +From [https://packagecloud.io/tyk/tyk-gateway](https://packagecloud.io/tyk/tyk-gateway) you have the following options: + +* Via the correct package for your Ubuntu version. We have packages for the following: + * Xenial + * Trusty + * Precise + +* Via Quick Installation Instructions. You can use: + * [Manual Instructions](https://packagecloud.io/tyk/tyk-gateway/install#manual-deb) + * [Chef](https://packagecloud.io/tyk/tyk-gateway/install#chef) + * [Puppet](https://packagecloud.io/tyk/tyk-gateway/install#puppet) + * [CI and Build Tools](https://packagecloud.io/tyk/tyk-gateway/ci) + +4. **Configure The Gateway** + +You can set up the core settings for the Tyk Gateway with a single setup script, however for more involved deployments, you will want to provide your own configuration file. + + + +You need to replace `` for `--redishost=` with your own value to run this script. + + + + +```console +$ sudo /opt/tyk-gateway/install/setup.sh --listenport=8080 --redishost= --redisport=6379 --domain="" +``` + +What you've done here is tell the setup script that: + +* `--listenport=8080`: Listen on port `8080` for API traffic. +* `--redishost=`: The hostname for Redis. +* `--redisport=6379`: Use port `6379` for Redis. +* `--domain=""`: Do not filter domains for the Gateway, see the note on domains below for more about this. + +In this example, you don't want Tyk to listen on a single domain. It is recommended to leave the Tyk Gateway domain unbounded for flexibility and ease of deployment. + +5. **Starting Tyk** + +The Tyk Gateway can be started now that it is configured. Use this command to start the Tyk Gateway: +```console +$ sudo service tyk-gateway start +``` + +### Install Tyk Gateway On Ubuntu Through Ansible + +#### Supported Distributions + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Debian | 11 | βœ… | +| Ubuntu | 20 | βœ… | +| Ubuntu | 18 | βœ… | +| Ubuntu | 16 | βœ… | + +#### Prerequisites + +Before you begin the installation process, make sure you have the following: +- [Git](https://git-scm.com/download/linux) - required for getting the installation files. +- [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) is required to run the following commands. +- Ensure port `8080` is open: this is used in this guide for Gateway traffic (the API traffic to be proxied). + +#### Steps for Installation + +1. **Clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repository** + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. **`cd` into the directory** +```bash +$ cd tyk-ansible +``` + +3. **Run initalisation script to initialise environment** + +```bash +$ sh scripts/init.sh +``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. **Run ansible-playbook to install `tyk-gateway-ce`** + +```bash +$ ansible-playbook playbook.yaml -t tyk-gateway-ce -t redis +``` + + +Installation flavors can be specified by using the -t {tag} at the end of the ansible-playbook command. In this case we are using: +-`tyk-gateway-ce`: Tyk Gateway with CE config +-`redis`: Redis database as Tyk Gateway dependency + + + +#### Variables + +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| redis.host | | Redis server host if different than the hosts url | +| redis.port | `6379` | Redis server listening port | +| redis.pass | | Redis server password | +| redis.enableCluster | `false` | Enable if redis is running in cluster mode | +| redis.storage.database | `0` | Redis server database | +| redis.tls | `false` | Enable if redis connection is secured with SSL | +| gateway.service.host | | Gateway server host if different than the hosts url | +| gateway.service.port | `8080` | Gateway server listening port | +| gateway.service.proto | `http` | Gateway server protocol | +| gateway.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.sharding.enabled | `false` | Set to `true` to enable filtering (sharding) of APIs | +| gateway.sharding.tags | | The tags to use when filtering (sharding) Tyk Gateway nodes. Tags are processed as OR operations. If you include a non-filter tag (e.g. an identifier such as `node-id-1`, this will become available to your Dashboard analytics) | + +- `vars/redis.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| redis_bind_interface | `0.0.0.0` | Binding address of Redis | + +Read more about Redis configuration [here](https://github.com/geerlingguy/ansible-role-redis). + + +## Install Tyk Gateway on Red Hat (RHEL / CentOS) + +The Tyk Gateway can be installed following different installation methods including *Shell* and *Ansible*. Please select by clicking the tab with the installation path most suitable for you. + +### Install Tyk Gateway Through Shell + +#### Supported Distributions + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| CentOS | 8 | βœ… | +| CentOS | 7 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | + +#### Prerequisites + +Before you begin the installation process, make sure you have the following: + +* Ensure port `8080` is open for Gateway traffic (the API traffic to be proxied). +* The Tyk Gateway has a [dependency](/tyk-configuration-reference/redis-cluster-sentinel#supported-versions) on Redis. Follow the steps provided by Red Hat to make the installation of Redis, conducting a [search](https://access.redhat.com/search/?q=redis) for the correct version and distribution. + +#### Steps for Installation +1. **Create Tyk Gateway Repository Configuration** + +Create a file named `/etc/yum.repos.d/tyk_tyk-gateway.repo` that contains the repository configuration settings for YUM repositories `tyk_tyk-gateway` and `tyk_tyk-gateway-source` used to download packages from the specified URLs. This includes GPG key verification and SSL settings, on a Linux system. + +Make sure to replace `el` and `8` in the config below with your Linux distribution and version: +```bash +[tyk_tyk-gateway] +name=tyk_tyk-gateway +baseurl=https://packagecloud.io/tyk/tyk-gateway/el/8/$basearch +repo_gpgcheck=1 +gpgcheck=0 +enabled=1 +gpgkey=https://packagecloud.io/tyk/tyk-gateway/gpgkey +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt +metadata_expire=300 + +[tyk_tyk-gateway-source] +name=tyk_tyk-gateway-source +baseurl=https://packagecloud.io/tyk/tyk-gateway/el/8/SRPMS +repo_gpgcheck=1 +gpgcheck=0 +enabled=1 +gpgkey=https://packagecloud.io/tyk/tyk-gateway/gpgkey +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt +metadata_expire=300 +``` + +Update your local yum cache by running: +```bash +sudo yum -q makecache -y --disablerepo='*' --enablerepo='tyk_tyk-gateway' +``` + +2. **Install Tyk Gateway** + +Install the Tyk Gateway using yum: +```bash +sudo yum install -y tyk-gateway +``` + + +You may be asked to accept the GPG key for our two repos and when the package installs, hit yes to continue. + + + +3. **Start Redis** + +If Redis is not running then start it using the following command: +```bash +sudo service redis start +``` +4. **Configuring The Gateway** + +You can set up the core settings for the Tyk Gateway with a single setup script, however for more complex deployments you will want to provide your own configuration file. + + + +Replace `` in `--redishost=` with your own value to run this script. + + + +```bash +sudo /opt/tyk-gateway/install/setup.sh --listenport=8080 --redishost= --redisport=6379 --domain="" +``` + +What you've done here is told the setup script that: + +* `--listenport=8080`: Listen on port `8080` for API traffic. +* `--redishost=`: The hostname for Redis. +* `--redisport=6379`: Use port `6379` for Redis. +* `--domain=""`: Do not filter domains for the Gateway, see the note on domains below for more about this. + +In this example, you don't want Tyk to listen on a single domain. It is recommended to leave the Tyk Gateway domain unbounded for flexibility and ease of deployment. + +5. **Start the Tyk Gateway** + +The Tyk Gateway can be started now that it is configured. Use this command to start the Tyk Gateway: +```bash +sudo service tyk-gateway start +``` + +### Install Tyk Gateway Through Ansible + +#### Supported Distributions + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| CentOS | 8 | βœ… | +| CentOS | 7 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | + +#### Prerequisites +Before you begin the installation process, make sure you have the following: + +1. [Git](https://git-scm.com/download/linux) - required for getting the installation files. +2. [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) - required for running the commands below. +3. Ensure port `8080` is open: this is used in this guide for Gateway traffic (the API traffic to be proxied). + +#### Steps for Installation + +1. **Clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repository** + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. **`cd` into the directory** +```bash +$ cd tyk-ansible +``` + +3. **Run the initalisation script to initialise your environment** + +```bash +$ sh scripts/init.sh +``` + +4. Modify the `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. **Run ansible-playbook to install `tyk-gateway-ce`** + +```bash +$ ansible-playbook playbook.yaml -t tyk-gateway-ce -t redis +``` + + +Installation flavors can be specified by using the -t {tag} at the end of the ansible-playbook command. In this case we are using: + -`tyk-gateway-ce`: Tyk Gateway with CE config + -`redis`: Redis database as Tyk Gateway dependency + + + +#### Variables +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| redis.host | | Redis server host if different than the hosts url | +| redis.port | `6379` | Redis server listening port | +| redis.pass | | Redis server password | +| redis.enableCluster | `false` | Enable if redis is running in cluster mode | +| redis.storage.database | `0` | Redis server database | +| redis.tls | `false` | Enable if redis connection is secured with SSL | +| gateway.service.host | | Gateway server host if different than the hosts url | +| gateway.service.port | `8080` | Gateway server listening port | +| gateway.service.proto | `http` | Gateway server protocol | +| gateway.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.sharding.enabled | `false` | Set to `true` to enable filtering (sharding) of APIs | +| gateway.sharding.tags | | The tags to use when filtering (sharding) Tyk Gateway nodes. Tags are processed as OR operations. If you include a non-filter tag (e.g. an identifier such as `node-id-1`, this will become available to your Dashboard analytics) | + +- `vars/redis.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| redis_bind_interface | `0.0.0.0` | Binding address of Redis | + +Read more about Redis configuration [here](https://github.com/geerlingguy/ansible-role-redis). + +## Install Tyk Gateway on Killercoda + +[Killercoda](https://killercoda.com/about) gives you instant access to a real Linux or Kubernetes command-line environment via your browser. +You can try this [Killercoda Tyk scenario](https://killercoda.com/tyk-tutorials/scenario/Tyk-install-OSS-docker-compose) to walk through the installation of our Open Source Gateway using Docker Compose (the exact same flow shown above). + diff --git a/basic-config-and-security/security/authentication-authorization/hmac-signatures.mdx b/basic-config-and-security/security/authentication-authorization/hmac-signatures.mdx new file mode 100644 index 000000000..1d227294d --- /dev/null +++ b/basic-config-and-security/security/authentication-authorization/hmac-signatures.mdx @@ -0,0 +1,147 @@ +--- +title: "Sign Requests with HMAC" +description: "How to configure HMAC Signatures in Tyk" +keywords: "Authentication, HMAC" +sidebarTitle: "HMAC Signatures" +--- + +## Introduction + +Hash-Based Message Authentication Code (HMAC) Signing is an access token method that adds another level of security by forcing the requesting client to also send along a signature that identifies the request temporally to ensure that the request is from the requesting user, using a secret key that is never broadcast over the wire. + +Tyk currently implements the latest draft of the [HMAC Request Signing standard](http://tools.ietf.org/html/draft-cavage-http-signatures-05). + +HMAC Signing is a good way to secure an API if message reliability is paramount, it goes without saying that all requests should go via TLS/SSL to ensure that MITM attacks can be minimized. There are many ways of managing HMAC, and because of the additional encryption processing overhead requests will be marginally slower than more standard access methods. + +An HMAC signature is essentially some additional data sent along with a request to identify the end-user using a hashed value, in our case we encode the 'date' header of a request, the algorithm would look like: + +``` +Base64Encode(HMAC-SHA1("date: Mon, 02 Jan 2006 15:04:05 MST", secret_key)) +``` + +The full request header for an HMAC request uses the standard `Authorization` header, and uses set, stripped comma-delimited fields to identify the user, from the draft proposal: + +``` +Authorization: Signature keyId="hmac-key-1",algorithm="hmac-sha1",signature="Base64Encode(HMAC-SHA1(signing string))" +``` + +Tyk supports the following HMAC algorithms: "hmac-sha1", "hmac-sha256", "hmac-sha384", "hmac-sha512”, and reads value from algorithm header. You can limit the allowed algorithms by setting the `hmac.allowedAlgorithms` (Tyk Classic: `hmac_allowed_algorithms`) field in your API definition, like this: `"hmac_allowed_algorithms": ["hmac-sha256", "hmac-sha512"]`. + +The date format for an encoded string is: + +``` +Mon, 02 Jan 2006 15:04:05 MST +``` + +This is the standard for most browsers, but it is worth noting that requests will fail if they do not use the above format. + +## How Tyk validates the signature of incoming requests + +When an HMAC-signed request comes into Tyk, the key is extracted from the `Authorization` header, and retrieved from Redis. If a key exists then Tyk will generate its own signature based on the request's "date" header, if this generated signature matches the signature in the `Authorization` header the request is passed. + +### Supported headers + +Tyk API Gateway supports full header signing through the use of the `headers` HMAC signature field. This includes the request method and path using the`(request-target)` value. For body signature verification, HTTP Digest headers should be included in the request and in the header field value. + + + +All headers should be in lowercase. + + + +#### Date header not allowed for legacy .Net + +Older versions of some programming frameworks do not allow the Date header to be set, which can causes problems with implementing HMAC, therefore, if Tyk detects a `x-aux-date` header, it will use this to replace the Date header. + +### Clock Skew + +Tyk also implements the recommended clock-skew from the specification to prevent against replay attacks, a minimum lag of 300ms is allowed on either side of the date stamp, any more or less and the request will be rejected. This means that requesting machines need to be synchronised with NTP if possible. + +You can edit the length of the clock skew in the API Definition by setting the `hmac.allowedClockSkew` (Tyk Classic: `hmac_allowed_clock_skew`) value. This value will default to 0, which deactivates clock skew checks. + +## Setting up HMAC using the Dashboard + +To enable the use of HMAC Signing in your API from the Dashboard: + +1. Scroll to the **Authentication** options +2. Select **HMAC (Signed Authentication Key)** from the drop-down list +3. Configure your **HMAC Request Signing** settings. +4. Select **Strip Authorization Data** to strip any authorization data from your API requests. +5. Select the location of the signature in the request. + +Configuring HMAC request signing + +## Configuring your API to use HMAC Request Signing + +HMAC request signing is configured within the Tyk Vendor Extension by adding the `hmac` object within the `server.authentication` section and enabling authentication. + +You must indicate where Tyk should look for the request signature (`header`, `query` or `cookie`) and which `algorithm` will be used to encrypt the secret to create the signature. You can also optionally configure a limit for the `allowedClockSkew` between the timestamp in the signature and the current time as measured by Tyk. + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + hmac: + enabled: true + header: + enabled: true + name: Authorization + allowedAlgorithms: + - hmac-sha256 + allowedClockSkew: -1 +``` + +Note that URL query parameter keys and cookie names are case sensitive, whereas header names are case insensitive. + +You can optionally [strip the auth token](/api-management/client-authentication#managing-authorization-data) from the request prior to proxying to the upstream using the `authentication.stripAuthorizationData` field (Tyk Classic: `strip_auth_data`). + +### Using Tyk Classic + +As noted in the Tyk Classic API [documentation](/api-management/gateway-config-tyk-classic#configuring-authentication-for-tyk-classic-apis), you can select HMAC Request Signing using the `enable_signature_checking` option. + +## Registering an HMAC user with Tyk + +When using HMAC request signing, you need to provide Tyk with sufficient information to verify the client's identity from the signature in the request. You do this by creating and registering HMAC user [session objects](/api-management/policies#what-is-a-session-object) with Tyk. When these are created, a matching HMAC secret is also generated, which must be used by the client when signing their requests. + +The way that this is implemented is through the creation of a key that grants access to the API (as you would for an API protected by [auth token](/api-management/authentication/bearer-token)) and indicating that the key is to be used for HMAC signed requests by setting `hmac_enabled` to `true`. Tyk will return the HMAC secret in the response confirming creation of the key. + +When calling the API, the client would never use the key itself as a token, instead they must sign requests using the provided secret. + +## Generating a signature + +This code snippet gives an example of how a client could construct and generate a Request Signature. + +```{.copyWrapper} +... + +refDate := "Mon, 02 Jan 2006 15:04:05 MST" + +// Prepare the request headers: +tim := time.Now().Format(refDate) +req.Header.Add("Date", tim) +req.Header.Add("X-Test-1", "hello") +req.Header.Add("X-Test-2", "world") + +// Prepare the signature to include those headers: +signatureString := "(request-target): " + "get /your/path/goes/here" +signatureString += "date: " + tim + "\n" +signatureString += "x-test-1: " + "hello" + "\n" +signatureString += "x-test-2: " + "world" + +// SHA1 Encode the signature +HmacSecret := "secret-key" +key := []byte(HmacSecret) +h := hmac.New(sha1.New, key) +h.Write([]byte(signatureString)) + +// Base64 and URL Encode the string +sigString := base64.StdEncoding.EncodeToString(h.Sum(nil)) +encodedString := url.QueryEscape(sigString) + +// Add the header +req.Header.Add("Authorization", + fmt.Sprintf("Signature keyId="9876",algorithm="hmac-sha1",headers="(request-target) date x-test-1 x-test-2",signature="%s"", encodedString)) + +... +``` diff --git a/basic-config-and-security/security/authentication-authorization/json-web-tokens.mdx b/basic-config-and-security/security/authentication-authorization/json-web-tokens.mdx new file mode 100644 index 000000000..9d081e8ea --- /dev/null +++ b/basic-config-and-security/security/authentication-authorization/json-web-tokens.mdx @@ -0,0 +1,560 @@ +--- +title: "JSON Web Token (JWT) Authentication" +description: "How to use JWT Authentication with Tyk" +keywords: "Authentication, JWT, JSON Web Tokens" +sidebarTitle: "Overview" +--- + +## Introduction + +JSON Web Token (JWT) is an open standard ([RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519)) that defines a compact and self-contained way for securely transmitting claims between parties as a JSON object. The information in the JSON object is digitally signed using either a secret (with the HMAC algorithm) or a public/private key pair (using RSA or ECDSA encryption) allowing the JWT to be used for client authentication. + +### JWT Workflow + +JWTs are commonly used in OAuth 2.0 and OpenID Connect flows to authenticate users and authorize access to APIs. The following diagram illustrates the typical flow of JWT authentication with Tyk Gateway: + +```mermaid +sequenceDiagram + participant User + participant IdP as Identity Provider (IdP) + participant Client as Client Application + participant Tyk as Tyk Gateway + + User->>IdP: Authorizes Client + IdP-->>Client: Issues JWT (with claims) + Client->>Tyk: Sends API request with JWT + Tyk->>IdP: (Optional) Fetch JWKS for validation + Tyk-->>Tyk: Validates JWT Signature + Tyk-->>Tyk: Extracts Claims + Tyk-->>Tyk: Applies Security Policies based on Claims + Tyk-->>Client: Forwards request or responds +``` + +1. **Token Issuance by Identity Provider (IdP):** + The Identity Provider (IdP) issues a JWT with the appropriate claims. These claims represent the permissions granted by the user to the client application. + +2. **Client as Token Bearer:** + The Client Application acts as the *bearer* of the JWT (access token). Although the client may have access to multiple APIs, the user's delegated scopes restrict actual access. For example, a client with access to 5 APIs might only get a token allowing access to 2, depending on the user's delegation. + +3. **JWT Presented to Tyk Gateway:** + The client includes the JWT in its API requests to the Tyk Gateway as an access token. + +4. **JWT Validation via JWK/JWKS:** + Tyk validates the JWT's signature using a JSON Web Key (JWK). This key can be statically configured or dynamically fetched from the IdP via a JSON Web Key Set (JWKS) endpoint, supporting key rotation and multiple keys. + +5. **Authorization via Claims and Policies:** + Claims within the JWT can be used by Tyk Gateway's authorization process to configure rate and quota limits via Tyk's [Security Policy](/api-management/policies) system. Within the API definition, JWT claims can be "mapped" onto security policies which will then be applied to the request. + +6. **Stateless Authentication:** + A key advantage of JWT authentication is that Tyk does not store any user credentials or session data. It never sees the user directly - it trusts the authorization server to have authenticated the user and issued a valid token. + +### Key Benefits + +This documentation is focused on how to use Tyk's JWT Auth and there are many excellent sources online where you can learn more about JWTs so we won't deep-dive into that topic, but to whet your appetite, here are some of the benefits from using JWT with Tyk: + +- **Stateless Authentication**: Eliminates the need for server-side session storage, improving scalability. +- **Flexible Integration**: Works with multiple identity providers including Auth0, Okta, and custom JWT issuers. +- **Enhanced Security**: Supports multiple signature validation methods (RSA, ECDSA, HMAC) and claim verification. +- **Granular Access Control**: Leverage JWT claims for policy enforcement and scope-based permissions. +- **Performance Optimized**: Efficient token validation with minimal overhead and support for JWKS caching. + +## Quick Start: Securing APIs with Auth0 or Keycloak + +In this tutorial, we'll secure a Tyk OAS API using JWT authentication with either Auth0 or Keycloak as the identity provider. + + + +If you want to try out JWT Auth without linking up to a third-party IdP then you can skip step 1 and provide the base64 encoded public key for your JWT (in the `source` field rather than configuring `jwksURIs`) in step 3. You'll need to generate a JWT for the request, but otherwise everything stays the same. + +Now back to the tutorial... + + + +We'll start by configuring the identity provider, then set up JWT validation in Tyk, create a security policy, configure the API to use the policy, and finally test the secured API with a valid token. + +### Prerequisites + +- A Tyk installation (Cloud or Self-Managed) with Tyk Dashboard license +- An Auth0 account or Keycloak installation +- An existing Tyk OAS API (see [this tutorial](/api-management/gateway-config-managing-oas#using-tyk-dashboard-api-designer-to-create-an-api)) +- Postman, cURL, or another API testing tool + +### Step-by-Step Guide + +1. **Configure Your Identity Provider to obtain your JWKS URI** + + The first step is to configure your Identity Provider (IdP) to issue JWTs and provide a JWKS URI that Tyk can use to validate the tokens. Below are instructions for both Auth0 and Keycloak. + + + + + 1. Log in to your Auth0 dashboard + 2. Navigate to Applications > APIs and click Create API + 3. Enter a name and identifier (audience) for your API + 4. Note your Auth0 domain (e.g. `your-tenant.auth0.com`) + 5. Your JWKS URI will be: `https://your-tenant.auth0.com/.well-known/jwks.json` + + + + + + 1. Log in to your Keycloak admin console + 2. Create or select a realm (e.g. `tyk-demo`) + 3. Navigate to Clients and create a new client with: + - Client ID: `tyk-api-client` + - Client Protocol: `openid-connect` + - Access Type: `confidential` + 4. After saving, go to the Installation tab and select "OIDC JSON" format + 5. Your JWKS URI will be: `http://your-keycloak-host/realms/tyk-demo/protocol/openid-connect/certs` + + + + + +2. **Create a Security Policy** + + 1. In the Tyk Dashboard, navigate to **Policies** + 2. Click **Add Policy** + 3. Configure the policy: + - Name: `JWT Auth Policy` + - APIs: Select your Tyk OAS API + - Access Rights: Configure appropriate paths and methods + - Authentication: Select JWT + - JWT Scope Claim Name: Enter the JWT claim that contains scopes (e.g. `scope` or `permissions`) + - Required Scopes: Add any required scopes for access (optional) + 4. Click Create to save your policy + +3. **Configure JWT Authentication in Tyk OAS API** + + 1. Navigate to APIs and select your API + 2. Click **Edit** + 3. Enable **Authentication** in the **Server** section, select **JSON Web Token (JWT)** as the authentication method + 4. Configure the JWT settings: + - Token Signing Method: Select `RSA Public Key` + - Subject identity claim: Set to `sub` + - JWKS Endpoint: Enter your JWKS URI for your IdP obtained in step 1 + - Policy claim: Set to `pol` + - Default policy: Select `JWT Auth Policy` (the policy you created previously) + - Clock Skew (optional): Set to accommodate time differences (e.g. `10`) + - Authentication Token Location: `header` + - Header Name: `Authorization` + - Strip Authorization Data: `Enabled` + 5. Click **Save API** + +4. **Test your API** + + 1. Obtain a JWT from your IdP + 2. Make a request to your API providing the JWT as a Bearer token in the `Authorization` header; Tyk will validate the JWT using the JWKS that it retrieves from your JWKS URI + 3. Observe that the request is successful + + ```bash + curl -X GET {API URL} -H "Accept: application/json" -H "Authorization: Bearer {token}" + ``` + +## How JWT Authentication works with Tyk + +This diagram outlines the flow when using JWT Auth to secure access to your API. + +JSON Web Tokens Flow + +1. Alice (the *user* or *resource owner*) authenticates with the Identity Provider (IdP) and consents to delegate specific permissions to a client application (steps 1 and 2). + +2. The client application receives an authorization code, which it then exchanges for an access token (step 3). This is a bearer token, meaning that the client can present it to access protected resources on behalf of the user (resource owner / Alice). + +3. When the client sends a request to the API gateway , it includes the access token (JWT) in the request - usually in the Authorization header as a Bearer token (step 4). + +4. Tyk validates the token's signature, using the public key(s) of the trusted issuer (IdP): + - locate the JWT in the request (header, cookie or query parameter) + - decode the JWT + - extract the `kid` (Key ID) from the token header + - fetch the public keys from all configured JWKS URIs (or use locally declared static public key) + - merge all the retrieved public keys into a single list + - search this list for a public key matching the extracted `kid` + - if no match is found, the validation fails, and the request is rejected + - if a matching key is found, the JWT signature is validated using the parameters in the JWK + - if signature validation fails, the request is rejected + - if the token is valid and not expired, the request is authenticated as coming from the client, and is accepted + +5. Next, Tyk will create an internal session for the request which will be used to control access rights, consumption limits and to identify the request in tracking logs (step 5). The session is linked to Alice using an identity that is [extracted from the JWT claims](/api-management/authentication/jwt-authorization#identifying-the-session-owner). + +6. In step 6 Tyk will proceed to enforce authorization by checking other claims to determine which Security Policies should be applied to the session: + - check for the value in the policy claim within the JWT (identified by the value stored in `basePolicyClaims`) + - use this to identify the Tyk Security Policy (or policies) to be applied to the request + - if there is no direct policy mapping, then the `defaultPolicy` will be used + - apply the identified policies to the session, configuring access rights, rate limits and usage quota + + +## Configuring your API to use JWT authentication + +The OpenAPI Specification treats JWT authentication as a variant of [bearer authentication](https://swagger.io/docs/specification/v3_0/authentication/bearer-authentication/) in the `components.securitySchemes` object using the `type: http`, `scheme: bearer` and `bearerFormat: jwt`: + +```yaml +components: + securitySchemes: + myAuthScheme: + type: http + scheme: bearer + bearerFormat: jwt + +security: + - myAuthScheme: [] +``` + +With this configuration provided by the OpenAPI description, in the Tyk Vendor Extension we need to enable authentication, to select this security scheme and to indicate where Tyk should look for the credentials. Usually the credentials will be provided in the `Authorization` header, but Tyk is configurable, via the Tyk Vendor Extension, to support custom header keys and credential passing via query parameter or cooke. + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + securitySchemes: + myAuthScheme: + enabled: true + header: + enabled: true + name: Authorization +``` + +**Note:** that URL query parameter keys and cookie names are case sensitive, whereas header names are case insensitive. + +You can optionally [strip the user credentials](/api-management/client-authentication#managing-authorization-data) from the request prior to proxying to the upstream using the `authentication.stripAuthorizationData` field (Tyk Classic: `strip_auth_data`). + +With the JWT method selected, you'll need to configure Tyk to handle the specific configuration of JSON Web Tokens that clients will be providing. All of the JWT specific configuration is performed within the `authentication.jwt` object in the [Tyk Vendor Extension](/api-management/gateway-config-tyk-oas#jwt). + +### Locating the JWT in the Request + +The OpenAPI Specification provides a `securitySchemes` mechanism that lets you define where the JWT should be located, for example in the request header. However, in practice, different clients may supply the token in different locations, such as a query parameter. + +While OAS does not support this natively, the Tyk Vendor Extension does this by allowing configuration of alternative locations in the JWT entry in `server.authentication.securitySchemes`. Building on the previous example, we can add optional query and cookie locations as follows: + +```yaml +x-tyk-api-gateway: + server: + authentication: + enabled: true + securitySchemes: + myAuthScheme: + enabled: true + header: + enabled: true + name: Authorization + query: + enabled: true + name: query-auth + cookie: + enabled: true + name: cookie-auth +``` + +### Using Tyk Classic APIs + +As noted in the Tyk Classic API [documentation](/api-management/gateway-config-tyk-classic#configuring-authentication-for-tyk-classic-apis), you can select JSON Web Token authentication using the `use_jwt` option. Tyk Classic APIs do not natively support multiple JWKS endpoints, though a [custom authentication plugin](/api-management/plugins/plugin-types#authentication-plugins) could be used to implement this functionality. + +## Signature Validation + +| Method | Cryptographic Style | Secret Type | Supported Locations for Secret | Supported Algorithms | +| :--------- | :------------------- | :------------- | :------------------------------ | :---------------------------------------------------- | +| **HMAC** | Symmetric | Shared secret | API definition | `HS256`, `HS384`, `HS512` | +| **RSA** | Asymmetric | Public key | API definition, JWKS endpoint | `RS256`, `RS384`, `RS512`, `PS256`, `PS384`, `PS512` | +| **ECDSA** | Asymmetric | Public key | API definition, JWKS endpoint | `ES256`, `ES384`, `ES512` | + +### Secret Management + +You must provide Tyk with the secret or key to be used to validate the incoming JWTs. + +- For the asymmetric methods (RSA and ECDSA) the public key can be stored in the API definition or Tyk can retrieve from a public JSON Web Key Sets (JWKS) endpoint (supporting dynamic rotation of keys in the JWKS) +- For symmetric encryption (HMAC), the secret is shared between the client and Tyk and so is stored within the API definition not on the public JWKS server + +#### Locally Stored Keys and Secrets + +When storing the key or secret in the API definition, it is first base64 encoded and then configured in `server.authentication.securitySchemes..source` (in Tyk Classic, this is `jwt_source`). For improved separation of concerns and flexibility, the key/secret can be placed in an [external key value store](/tyk-configuration-reference/kv-store), with the appropriate reference configured in the API definition. + +For example, this fragment will configure the JWT authentication middleware to use the secret located at `consul://secrets/jwt-secret` to validate the signature of incoming JWTs. Note that the external KV store reference has been base64 encoded and then stored in `source`: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + source: Y29uc3VsOi8vc2VjcmV0cy9qd3Qtc2VjcmV0 +``` + +#### Remotely Stored Keys (JWKS endpoint) + +##### Single JWKS endpoint + +Prior to Tyk 5.9.0 and when using Tyk Classic APIs, Tyk can only retrieve a single JSON Web Key Set, from a JWKS endpoint configured in `server.authentication.securitySchemes..source` (in Tyk Classic, this is `jwt_source`). This field accepts the base64 encoded full URI (including protocol) of the JWKS endpoint. + +For example, the following Tyk OAS fragment will configure the JWT authentication middleware to retrieve the JWKS from `https://your-tenant.auth0.com/.well-known/jwks.json` when validating the signature of incoming JWTs. Note that the JWKS endpoint has been base64 encoded and then stored in `source`: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + source: aHR0cHM6Ly95b3VyLXRlbmFudC5hdXRoMC5jb20vLndlbGwta25vd24vandrcy5qc29u +``` + +##### Multiple JWKS endpoints + +From **Tyk 5.9.0** onwards, Tyk OAS APIs can validate against multiple JWKS endpoints, allowing you to use different IdPs to issue JWTs for the same API. Multiple JWKS endpoints can be configured in the `.jwksURIs` array. Note that these URIs are not base64 encoded in the API definition and so are human-readable. Tyk will retrieve the JSON Web Key Sets from each of these endpoints and these will be used to attempt validation of the received JWT. + +For example, the following fragment will configure the JWT authentication middleware to retrieve the JWKS from both Auth0 and Keycloak when validating the signature of incoming JWTs: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + jwksURIs: + - url: https://your-tenant.auth0.com/.well-known/jwks.json + - url: http://your-keycloak-host/realms/tyk-demo/protocol/openid-connect/certs +``` + +*Multiple JWKS endpoints and the `jwksURIs` array are not supported by Tyk Classic APIs.*

+ + + +If both `.source` and `.jwksURIs` are configured, the latter will take precedence. + + + +#### JWKS caching + +Tyk caches the JSON Web Key Set (JWKS) retrieved from JWKS endpoints to reduce the performance impact of contacting external services during request handling. A separate cache is maintained for each JWKS endpoint for each API, with a default validity period of *240 seconds*, after which the cache will be refreshed when a new request is received. + +From **Tyk 5.10.0** onwards, we have introduced enhanced JWKS caching for Tyk OAS APIs with the following improvements: + +- **Configurable cache timeout** - Set custom validity periods per JWKS endpoint +- **Pre-fetch functionality** - Automatically retrieve and cache all JWKS when the API loads to the Gateway, ensuring the first request doesn't experience the latency of fetching keys from external endpoints +- **Cache management API** - New endpoints to manually invalidate JWKS caches + +For example, the following fragment will configure the JWT authentication middleware to retrieve the JWKS from both Auth0 and Keycloak when validating the signature of incoming JWTs, assigning a 300 second validity period to the Auth0 JWKS and 180 second validity period for Keycloak: + +```yaml +x-tyk-api-gateway: + server: + authentication: + securitySchemes: + jwtAuth: + jwksURIs: + - url: https://your-tenant.auth0.com/.well-known/jwks.json + cacheTimeout: "300s" # 5 minutes + - url: http://your-keycloak-host/realms/tyk-demo/protocol/openid-connect/certs + cacheTimeout: "3m" # 3 minutes (alternative format) +``` +##### Configuration Options + +| Field | Type | Description | Default | Supported Formats | +| :------- | :------ | :------------- | :--------- | :------------------- | +| `url` | string | JWKS endpoint URL | Required | Full URI including protocol | +| `cacheTimeout` | string | Cache validity period | 240s | `"300s"`, `"5m"`, `"1h"`, etc. | + + + + +Tyk Classic APIs continue to use the existing JWKS caching behavior with the 240-second default timeout. The enhanced caching features are available only for Tyk OAS APIs. + + + +##### JWKS Cache Management + +New Gateway API endpoints are available from **Tyk 5.10.0** to manage JWKS caches programmatically. These endpoints work for both Tyk OAS and Tyk Classic APIs: + +| Endpoint | Method | Description | +| :---------- | :--------- | :------------- | +| `/tyk/cache/jwks` | `DELETE` | Invalidate JWKS caches for all APIs | +| `/tyk/cache/jwks/{apiID}` | `DELETE` | Invalidate JWKS cache for a specific API | + +**Note:** These endpoints are currently available only through the Tyk Gateway API and are not yet extended to the Tyk Dashboard API. + +**Example usage:** +```bash +# Flush all JWKS caches +curl -X DELETE http://your-gateway:8080/tyk/cache/jwks \ + -H "x-tyk-authorization: your-secret" + +# Flush JWKS cache for specific API +curl -X DELETE http://your-gateway:8080/tyk/cache/jwks/your-api-id \ + -H "x-tyk-authorization: your-secret" +``` + +##### Feature Compatibility Summary + +| Feature | Tyk Classic | Tyk OAS | Available From | +| :--------- | :------------- | :--------- | :---------------- | +| Single JWKS endpoint | βœ… | βœ… | All versions | +| Multiple JWKS endpoints | ❌ | βœ… | Tyk 5.9.0+ | +| Configurable cache timeout | ❌ | βœ… | Tyk 5.10.0+ | +| Pre-fetch functionality | ❌ | βœ… | Tyk 5.10.0+ | +| Cache management API | βœ… | βœ… | Tyk 5.10.0+ | + +## Split Token Flow + +Split Token Flow addresses a fundamental security concern with JWT tokens: when a JWT is stored on a client device (browser, mobile app, etc.), all of its contents can be easily decoded since JWTs are only base64-encoded, not encrypted. This means sensitive information in the payload is potentially exposed. + +The JWT consists of three parts: + +Split Token Example + +In the above example you can see that they are: + +- Header: `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9` +- Payload: `eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJlbWFpbCI6ImhlbGxvQHdvcmxkLmNvbSJ9` +- Signature: `EwIaRgq4go4R2M2z7AADywZ2ToxG4gDMoG4SQ1X3GJ0` + +The Split Token approach provides a solution by: + +1. Separating the JWT into its three component parts: header, payload, and signature +2. Storing only the signature on the client side (which by itself is meaningless) +3. Keeping the header and payload securely on the server side (in Tyk) +4. Reconstructing the complete JWT when needed for authentication + +This approach combines the benefits of JWTs (rich claims, stateless validation) with the security of opaque tokens (no information disclosure). + +### When to Use Split Token Flow + +Consider using Split Token Flow when: + +- Your JWT payload contains sensitive information that shouldn't be exposed to clients +- You want to prevent token inspection by malicious actors +- You need the flexibility of JWT while maintaining higher security +- You're implementing systems that must meet strict security compliance requirements + +### How Split Token Flow Works + +Here's how the process works with Tyk Gateway: + +1. Token Issuance: + + - A `/token` endpoint is configured on Tyk from which the client should request the access token + - Tyk requests an access token from an authorization server (e.g., Keycloak) on behalf of the client + - The authorization server returns a complete JWT + - Tyk intercepts this response through a Virtual Endpoint + - Tyk splits the JWT into its components and stores the header and payload in its Redis database + - Only the signature portion is returned to the client as an "opaque" token + +2. Token Usage: + + - The client makes API requests using only the signature as their access token + - Tyk receives the request and looks up the stored header and payload using the signature + - Tyk reconstructs the complete JWT and validates it + - If valid, Tyk forwards the request to the upstream API with the full JWT + +3. Security Benefits: + + - The client never possesses the complete JWT, only a meaningless signature + - Token contents cannot be inspected by client-side code or malicious actors + - Token validation still occurs using standard JWT verification + + +### Implementing Split Token Flow + +1. **Create a Virtual Endpoint for Token Issuance** + + First, create a virtual endpoint in Tyk that will: + + - Receive authentication requests from clients + - Forward these requests to your authorization server + - Split the returned JWT + - Store the header and payload in Tyk's storage + - Return only the signature to the client + - Here's a simplified implementation: + + ```javascript + function splitTokenHandler(request, session, config) { + // 1. Forward the client's credentials to the authorization server + var authServerResponse = forwardToAuthServer(request); + + if (authServerResponse.Code !== 200) { + return TykJsResponse({ + Body: authServerResponse.Body, + Code: authServerResponse.Code + }, session.meta_data); + } + + // 2. Extract the JWT from the response + var responseBody = JSON.parse(authServerResponse.Body); + var fullJWT = responseBody.access_token; + + // 3. Split the JWT into its components + var jwtParts = fullJWT.split("."); + var header = jwtParts[0]; + var payload = jwtParts[1]; + var signature = jwtParts[2]; + + // 4. Store the complete JWT in Tyk's Redis database using the signature as the key + // This function would use Tyk's storage API to save the data + storeJWTComponents(signature, header, payload, fullJWT); + + // 5. Modify the response to return only the signature + responseBody.access_token = signature; + + return TykJsResponse({ + Body: JSON.stringify(responseBody), + Code: 200 + }, session.meta_data); + } + ``` + + Note that this example includes some level of abstraction for clarity and so is not a full implementation. + +2. **Configure Custom Pre-Auth Plugin** + + Next, create a custom pre-auth plugin that reconstructs the JWT before it reaches the standard Tyk JWT Auth middleware: + + ```javascript + function reconstructJWT(request, session, config) { + // 1. Extract the signature from the Authorization header + var authHeader = request.Headers["Authorization"]; + var signature = authHeader.replace("Bearer ", ""); + + // 2. Retrieve the stored JWT components using the signature + var storedJWT = retrieveJWTComponents(signature); + + if (!storedJWT) { + return TykJsResponse({ + Body: "Invalid token", + Code: 401 + }, session.meta_data); + } + + // 3. Replace the Authorization header with the full JWT + request.SetHeaders["Authorization"] = "Bearer " + storedJWT.fullJWT; + + return request; + } + ``` + +3. **Test the Implementation** + + To test your Split Token Flow: + + Request a token from your Tyk virtual endpoint: + + ```bash + curl -X POST https://your-tyk-gateway/token \ + -d "grant_type=client_credentials&client_id=your-client-id&client_secret=your-client-secret" + ``` + + You'll receive a response with only the signature as the access token, for example: + + ```json + { + "access_token": "EwIaRgq4go4R2M2z7AADywZ2ToxG4gDMoG4SQ1X3GJ0", + "token_type": "bearer", + "expires_in": 3600 + } + ``` + + Use this token to access your JWT Auth protected API where you have configured the custom pre-auth plugin and JWT Auth: + + ```bash + curl https://your-tyk-gateway/protected-api \ + -H "Authorization: Bearer EwIaRgq4go4R2M2z7AADywZ2ToxG4gDMoG4SQ1X3GJ0" + ``` + + diff --git a/basic-config-and-security/security/authentication-authorization/multiple-auth.mdx b/basic-config-and-security/security/authentication-authorization/multiple-auth.mdx new file mode 100644 index 000000000..a58f1518b --- /dev/null +++ b/basic-config-and-security/security/authentication-authorization/multiple-auth.mdx @@ -0,0 +1,539 @@ +--- +title: "Combine Authentication Methods" +description: "How to combine multiple authentication methods in Tyk to enhance security and flexibility." +keywords: "Authentication, Authorization, Tyk Authentication, Tyk Authorization, Multi Authentication, Chained Authentication" +sidebarTitle: "Multi Auth" +--- + +## Introduction + +Tyk provides flexible multi-authentication capabilities, allowing you to combine various [authentication methods](/api-management/client-authentication#what-does-tyk-support) using different logical approaches: + +- AND logic: All configured authentication methods must succeed before granting access +- OR logic: Any one of the configured authentication methods can grant access _(Tyk OAS APIs only)_ + +This enables scenarios such as requiring both Bearer Token authentication and Basic Auth simultaneously, or allowing access via either JWT validation or API key authentication. + +```mermaid +graph LR + Client([Client]) -->|Request| Gateway[Tyk Gateway] + + subgraph "Multiple Authentication" + Gateway --> Auth1[Authentication Method 1
e.g., API Key] + Auth1 --> Auth2[Authentication Method 2
e.g., Basic Auth] + Auth2 --> Auth3[Authentication Method N
e.g., JWT] + Auth3 --> SessionCreation[Create Session
& Apply Policies] + SessionCreation --> AccessDecision{Access
Decision} + end + + AccessDecision -->|Granted| Upstream[(Upstream
API)] + AccessDecision -->|Denied| Reject[Reject Request] + + %% Styling + classDef client fill:#f9f9f9,stroke:#333,stroke-width:2px + classDef gateway fill:#d4edda,stroke:#28a745,stroke-width:2px + classDef auth fill:#e2f0fb,stroke:#0275d8,stroke-width:1px + classDef session fill:#d1ecf1,stroke:#17a2b8,stroke-width:1px + classDef decision fill:#cce5ff,stroke:#0275d8,stroke-width:2px + classDef upstream fill:#f8f9fa,stroke:#6c757d,stroke-width:2px + classDef reject fill:#f8d7da,stroke:#dc3545,stroke-width:2px + + class Client client + class Gateway gateway + class Auth1,Auth2,Auth3 auth + class SessionCreation session + class AccessDecision decision + class Upstream upstream + class Reject reject +``` + +## Use Cases + +- **Multi-tenant APIs**: Different tenants using different identity providers +- **Migration scenarios**: Supporting both legacy and modern auth during transitions +- **Partner integrations**: External partners use mTLS while internal users use JWT +- **Mobile + Web**: Different auth methods for different client types + +{/* ## Quick Start + + + +In this quick-start guide, we will configure a Tyk OAS API with multiple authentication methods. We will demonstrate how to set up an API that supports both Basic Auth and API Key authentication, allowing clients to authenticate using either method. + +### Prerequisites + +- **Working Tyk Environment:** You need access to a running Tyk instance that includes both the Tyk Gateway and Tyk Dashboard components. For setup instructions using Docker, please refer to the [Tyk Quick Start](https://github.com/TykTechnologies/tyk-pro-docker-demo?tab=readme-ov-file#quick-start). +- **Curl**: These tools will be used for testing. + +### Instructions + +#### Create an API + +#### Configuration + +#### Testing */} + +## Understanding Authentication Modes + +```mermaid +graph LR + Client([Client]) -->|Request| Gateway[Tyk Gateway] + + subgraph "Tyk Multiple Authentication" + Gateway --> AuthConfig{Authentication
Configuration} + + %% Legacy Mode + AuthConfig -->|Legacy Mode| LegacyAuth["AND Logic
(All must pass)"] + LegacyAuth --> Auth1[Auth Method 1] + Auth1 --> Auth2[Auth Method 2] + Auth2 --> SessionCreation["Create Session
(from BaseIdentityProvider)"] + + %% Compliant Mode + AuthConfig -->|Compliant Mode| CompliantAuth["OR Logic
(Any group can pass)"] + CompliantAuth --> Group1["Group 1
(AND Logic)"] + CompliantAuth --> Group2["Group 2
(AND Logic)"] + + Group1 --> G1Auth1["Auth Method A"] + G1Auth1 --> G1Auth2["Auth Method B"] + + Group2 --> G2Auth1["Auth Method C"] + G2Auth1 --> G2Auth2["Auth Method D"] + + G1Auth2 --> DynamicSession1["Create Session
(from last auth in group)"] + G2Auth2 --> DynamicSession2["Create Session
(from last auth in group)"] + + SessionCreation --> AccessDecision + DynamicSession1 --> AccessDecision + DynamicSession2 --> AccessDecision + + AccessDecision{Access
Decision} + end + + AccessDecision -->|Granted| Upstream[(Upstream
API)] + AccessDecision -->|Denied| Reject[Reject Request] + + %% Styling + classDef client fill:#f9f9f9,stroke:#333,stroke-width:2px + classDef gateway fill:#d4edda,stroke:#28a745,stroke-width:2px + classDef auth fill:#e2f0fb,stroke:#0275d8,stroke-width:1px + classDef group fill:#fff3cd,stroke:#ffc107,stroke-width:1px + classDef session fill:#d1ecf1,stroke:#17a2b8,stroke-width:1px + classDef decision fill:#cce5ff,stroke:#0275d8,stroke-width:2px + classDef upstream fill:#f8f9fa,stroke:#6c757d,stroke-width:2px + classDef reject fill:#f8d7da,stroke:#dc3545,stroke-width:2px + + class Client client + class Gateway gateway + class Auth1,Auth2,G1Auth1,G1Auth2,G2Auth1,G2Auth2,LegacyAuth,CompliantAuth auth + class Group1,Group2 group + class SessionCreation,DynamicSession1,DynamicSession2 session + class AuthConfig,AccessDecision decision + class Upstream upstream + class Reject reject +``` + +Tyk OAS offers two modes for configuring multiple authentication methods: + +- **Legacy Mode** (Default): Maintains backward compatibility with existing Tyk implementations using AND logic only +- **Compliant Mode**: Introduced in 5.10.0, provides enhanced flexibility by supporting both AND and OR logic between authentication methods + + + + + Tyk Classic APIs and pre-5.10 Tyk OAS APIs only support the legacy mode. + + + +### Legacy Mode + +The Legacy mode is the traditional implementation of multi-auth, supported by Tyk Classic APIs and Tyk OAS APIs prior to Tyk 5.10. + +In this mode, all configured authentication methods must be satisfied in the request (i.e., they are combined using AND logic). + +**How does the operation differ between Tyk Classic and Tyk OAS APIs?** + +- **Tyk Classic API**: All configured authentication methods must be satisfied in the request + +- **Tyk OAS API**: Only the **first** security requirement object in the OpenAPI description's `security` array is processed. All the authentication methods in the first object must be satisfied in the request, together with any proprietary auth methods `enabled` in the Tyk Vendor Extension. + + ``` + security: + - api_key: [] # this security requirement is processed: both methods must be satisfied + basic_auth: [] + - jwt_auth: [] # Ignored in Legacy mode + ``` + +#### Session Object Handling + +In Legacy mode, the `baseIdentityProvider` setting determines which authentication method provides the [session object](/api-management/policies#what-is-a-session-object). This setting must be configured to one of auth methods in the logical rule using the following mapping: + +- `auth_token` - for token-based authentication +- `jwt_claim` - for JWT authentication +- `basic_auth_user` - for Basic Authentication +- `hmac_key` - for HMAC authentication +- `custom_auth` - for custom authentication plugin + + +### Compliant Mode + +The Compliant mode is named as such because Tyk complies with the security requirements declared in the OpenAPI description, combining different authentication methods using AND and OR logic as required. + +#### OpenAPI Security Requirements + +In OpenAPI, security is defined using **Security Requirement Objects** in the `security` section: + +``` +security: + - api_key: [] + basic_auth: [] + - jwt_auth: [] + oauth2: [] +``` + +- Each security requirement object in the OAS `security` array represents an **alternative**, these are evaluated with **OR** logic. +- Within a single security requirement object, multiple security schemes can be declared and will combined using **AND** logic (i.e., all listed schemes must succeed together). +- A request is authorized if **any one** of the defined security requirement objects is successfully validated. +- The session object is determined dynamically based on which security requirement is satisfied +- This structure enables **multi-auth configurations**, supporting both **combined (AND)** and **alternative (OR)** authentication methods. + +#### How OR Logic Works + +When using Compliant mode with multiple security requirements: + +- Tyk attempts each authentication method in sequence +- If any method succeeds, the request is authorized +- If all methods fail, the request is rejected with the error from the last attempted method + +#### How AND Logic Works + +Within a single security requirement object that contains multiple schemes: + +- All schemes within that security requirement must be satisfied (AND logic) +- The request is only authorized if all schemes are valid +- If any scheme fails, the entire security requirement fails, and Tyk moves to the next one +- This allows for combining different authentication methods that must all be present + +#### Examples + +Here's an example `security` object from an OpenAPI description with both AND and OR logic: + +```json +{ + "security": [ + { + "scheme1": [] + }, + { + "scheme2": [], + "scheme3": [] + } + ] +} +``` +You will notice that the `security` object should contain references to security schemes that you've defined in the `components.securitySchemes` section of the OpenAPI description. +In this example, the request will be authorized if either: +- `scheme1` is provided (first security requirement) +- OR +- Both `scheme2` AND `scheme3` are provided (second security requirement with AND logic) + +#### Session Object Handling + +The [session object](/api-management/policies#what-is-a-session-object) (determining rate limits, quotas, and access rights) comes from the successful authentication method. This allows different auth methods to have different associated policies and permissions. + +When using **Compliant** mode, the session object handling is more dynamic: + +1. Between different security requirement objects (OR logic): The first security requirement that successfully authenticates will provide the session object. + +2. Within a single security requirement object (AND logic): When multiple authentication methods are specified in the same requirement object (as in your example below), all methods must pass, and the **last** successfully processed authentication method will provide the session object. + +Auth methods are always validated in the following order (and skipped if not included in the security requirement): + +1. [Tyk OAuth 2.0](/api-management/authentication/oauth-2) +2. External OAuth (([deprecated](/api-management/client-authentication#integrate-with-external-authorization-server-deprecated)) +3. [Basic Auth](/api-management/authentication/basic-authentication) +4. [HMAC](/basic-config-and-security/security/authentication-authorization/hmac-signatures) +5. [JWT](/basic-config-and-security/security/authentication-authorization/json-web-tokens) +6. OpenID Connect ([deprecated](/api-management/client-authentication#integrate-with-openid-connect-deprecated)) +7. [Custom Plugin Auth](/api-management/authentication/custom-auth) +8. [Bearer / Auth Token](/api-management/authentication/bearer-token) (API Key) + +For example, if this security requirement is satisfied in the request, the session metadata will come from the Auth Token, despite it being declared first in the security requirement: + +``` +security: + - api_key: [] + basic_auth: [] +``` + + + + + +### Choosing the Right Mode + +**Use Legacy Mode when:** + +- Migrating from or using Tyk Classic APIs +- You need the session metadata to be taken from an auth method earlier in the middleware processing order + +**Use Compliant Mode when:** + +- You need alternative auth methods (OR logic) +- Supporting multiple client types or identity providers +- For APIs that need to serve diverse client bases with different security requirements +- Building new APIs with flexible authentication requirements + +## Configuration Options + +### Security Processing Mode + +The `securityProcessingMode` option in the Tyk Vendor Extension allows you to specify which mode to use when processing the `security` configuration in your API. This controls how Tyk will interpret the authentication settings in the OpenAPI description and the Vendor Extension. + +```yaml +x-tyk-api-gateway: + server: + authentication: + securityProcessingMode: compliant // or legacy +``` + +### Basic Example: API with Multiple Auth Methods + +Here's a simple example of an OpenAPI description that declares JWT and API Key security schemes: + +```yaml +# Example: API supporting either JWT OR API Key authentication +components: + securitySchemes: + api_key: + type: apiKey + name: X-API-Key + in: header + description: "API key for service-to-service authentication" + jwt_auth: + type: http + scheme: bearer + bearerFormat: JWT + description: "JWT token for user authentication" +security: + - api_key: [] # Option 1: API key only + - jwt_auth: [] # Option 2: JWT only +``` + +- If the `securityProcessingMode` in the Tyk Vendor Extension is set to `compliant`, Tyk will check incoming requests against each `security` option in turn, authenticating requests using Option 1 or Option 2. +- If the `securityProcessingMode` is set to `legacy` (or is omitted), Tyk will check requests only against the first `security` option (Option 1). + + +### Configuring Multiple Auth Methods in the API Designer + +You can configure chained authentication by following these steps: + +1. Enable **Authentication** in the **Servers** section + +2. Select the **Multiple Authentication Mechanisms** option from the drop-down list. + +3. Select the **Authentication Mode** that you wish to use: [Compliant](/basic-config-and-security/security/authentication-authorization/multiple-auth#compliant-mode) or [Legacy](/basic-config-and-security/security/authentication-authorization/multiple-auth#legacy-mode) + + + + + Select **Compliant mode** for full interpretation of the security requirements declared in the OpenAPI description, allowing for fully flexible authentication of your API clients: + + Select Compliant mode + + Use the API Editor view to configure the different security schemes and requirements to satisfy the client authentication needs of your API: + + Configure Compliant mode + + + + + Use Legacy mode for simple scenarios where you can select the **Authentication methods** that the client must satisfy in the request. + + You must identify the **Base identity provider** that will provide the session metadata: + + Select Legacy mode + + You can now configure each authentication method in the usual manner using the options in the API designer. + + Configure the Auth Methods for Legacy mode + + + + + + +## Advanced Configuration + +### Using Proprietary Auth Methods + +Compliant mode allows you to combine standard OpenAPI security schemes with Tyk's proprietary authentication methods by extending the OpenAPI `security` section into the Tyk Vendor Extension: + +```yaml +components: + securitySchemes: + api_key: + type: apiKey + name: Authorization + in: header + jwt_auth: + type: http + scheme: bearer + bearerFormat: JWT +security: + - jwt_auth: [] +x-tyk-api-gateway: + server: + authentication: + securityProcessingMode: compliant + security: + - - hmac + - api_key + - - custom_auth + securitySchemes: + hmac: + enabled: true + custom_auth: + enabled: true + config: + authType: coprocess +``` + +The extended security requirements in the vendor extension (`x-tyk-api-gateway.server.authentication.security`) are concatenated onto the requirements declared in the OpenAPI description. This configuration allows three authentication methods: JWT, API Key with HMAC, and Custom Auth. + +## Migration Considerations + +### Moving from Legacy to Compliant Mode + + + +If you change the security processing mode for an existing API from the Dashboard's API Designer, Tyk will add the `securityProcessingMode` field to your Vendor Extension, but will not make any other changes to the API's configuration. You may need to make adjustments to the OpenAPI description or Vendor Extension to ensure that the authentication rules are set correctly. + + + +When migrating from Legacy to Compliant mode: + +- Review your API's authentication configuration +- Ensure all required security schemes are properly defined, for example any use of Tyk proprietary auth methods (HMAC, custom authentication) will need to be reflected with creation of new security requirements within the Vendor Extension's `security` section and removal of requirements from the OpenAPI description's `security` section +- Test thoroughly, as authentication behavior will change +- Be aware that the session object may come from different sources depending on which auth method succeeds + +### Backward Compatibility + +*Legacy* mode ensures backward compatibility with existing Tyk implementations. If you're unsure which mode to use, start with *Legacy* mode and migrate to *Compliant* mode when ready. + +## Troubleshooting + + + +**Problem**: API returns 401 errors even with valid credentials. + +**Possible Causes & Solutions**: + +1. Security schemes not properly defined + + ```yaml + # ❌ Incorrect - missing security scheme definition + security: + - api_key: [] + # No corresponding securitySchemes definition + + # βœ… Correct - complete definition + components: + securitySchemes: + api_key: + type: apiKey + name: Authorization + in: header + security: + - api_key: [] + ``` + +2. Security schemes not enabled in Tyk extension + + ```yaml + x-tyk-api-gateway: + server: + authentication: + securityProcessingMode: compliant + securitySchemes: + api_key: + enabled: true # ← Must be explicitly enabled + ``` + +3. Mixed Legacy/Compliant configuration + + - Ensure you're not mixing `baseIdentityProvider` (Legacy) with complex security arrays (Compliant) + - Check that `securityProcessingMode` matches your intended configuration + + + +**Problem**: Requests are authenticated but get unexpected rate limits or access denials. + +**Root Cause**: In Compliant mode, the session object comes from whichever authentication method succeeds first. + +**Solutions**: + +1. Review security requirement order - Place most restrictive auth methods first: + + ```yaml + security: + - premium_jwt: [] # Premium users (higher limits) + - basic_api_key: [] # Basic users (lower limits) + ``` + +2. Ensure consistent policies across auth methods: + + - Verify that API keys and JWT tokens for the same user have similar access rights + - Check that rate limits align with your business logic + +3. Debug session source: + + ```bash + # Enable debug logging to see which auth method succeeded + "log_level": "debug" + ``` + + + +**Problem**: Slower response times with multiple authentication methods. + +**Expected Behavior**: Some performance impact is normal due to additional processing. + +**Optimization**: + +1. Order security requirements by likelihood: + + ```yaml + security: + - most_common_auth: [] # Try most common first + - fallback_auth: [] # Fallback for edge cases + ``` + +2. Monitor authentication attempts: + + ```bash + # Look for "OR wrapper" log entries showing auth attempts + grep "OR wrapper" /var/log/tyk/tyk.log + ``` + + + +Enable detailed logging in your Tyk Gateway to see which authentication methods are being attempted and which one succeeds: + +```json +{ + "global": { + "log_level": "debug" + } +} +``` + +Look for these log entries: +- `Processing multiple security requirements (OR conditions)` + - Confirms Compliant mode is active +- `OR wrapper` entries + - In Compliant mode, this shows which auth methods are being tried +- `BaseIdentityProvider set to` + - In Legacy mode, this shows which auth method succeeded + + diff --git a/basic-config-and-security/security/authentication-authorization/open-keyless.mdx b/basic-config-and-security/security/authentication-authorization/open-keyless.mdx new file mode 100644 index 000000000..8adc9132e --- /dev/null +++ b/basic-config-and-security/security/authentication-authorization/open-keyless.mdx @@ -0,0 +1,14 @@ +--- +title: "Open (No Authentication)" +description: "How to configure open or keyless authentication in Tyk." +keywords: "Authentication, Authorization, Tyk Authentication, Tyk Authorization, Open Authentication, Keyless Authentication" +sidebarTitle: "No Authentication" +--- + +Open or keyless authentication allows access to APIs without any authentication. This method is suitable for public APIs where access control is not required. + +Tyk OAS APIs are inherently "open" unless authentication is configured, however the older Tyk Classic API applies [auth token](/api-management/authentication/bearer-token) protection by default. + +You can disable authentication for a Tyk Classic API by setting the `use_keyless` flag in the API definition. + + diff --git a/basic-config-and-security/security/mutual-tls/client-mtls.mdx b/basic-config-and-security/security/mutual-tls/client-mtls.mdx new file mode 100644 index 000000000..86b896fc7 --- /dev/null +++ b/basic-config-and-security/security/mutual-tls/client-mtls.mdx @@ -0,0 +1,313 @@ +--- +title: "Authentication using Mutual TLS" +description: "How to configure Mutual TLS (mTLS) for client authentication in Tyk." +keywords: "Authentication, Authorization, Tyk Authentication, Tyk Authorization, Mutual TLS, mTLS, Client mTLS" +sidebarTitle: "Mutual TLS" +--- + +## Introduction + +Mutual TLS (mTLS) is a robust security feature that ensures both the client and server authenticate each other using TLS certificates. This two-way authentication process provides enhanced security for API communications by cryptographically verifying the identity of both parties involved in the connection. + +In most cases when you try to access a secured HTTPS/TLS endpoint, you experience only the client-side check of the server certificate. The purpose of this check is to ensure that no fraud is involved and the data transfer between the client and server is encrypted. In fact, the TLS standard allows specifying the client certificate as well, so the server can accept connections only for clients with certificates registered with the server certificate authority, or provide additional security checks based on the information stored in the client certificate. This is what we call "Mutual TLS" - when both sides of the connection verify certificates. See the video below that gives you an introduction to mutual TLS and how it can be used to secure your APIs. + + + +## Why Use Mutual TLS? + +Mutual TLS is particularly valuable in environments where security is paramount, such as microservices architectures, financial services, healthcare, and any scenario requiring zero-trust security. It not only encrypts the data in transit but also ensures that the communicating parties are who they claim to be, mitigating the risks of unauthorized access and data breaches. + +* **Enhanced Security:** Provides two-way authentication, ensuring both the client and server are verified and trusted. +* **Data Integrity:** Protects the data exchanged between client and server by encrypting it, preventing tampering or interception. +* **Compliance:** Helps meet stringent security and compliance requirements, especially in regulated industries. + +## Client mTLS for Tyk Cloud + +Tyk Cloud users cannot currently use mTLS to secure the client to Gateway communication for Tyk-hosted gateways. + + +Tyk Hybrid users can, however, use mTLS with their self-hosted gateways. + + +## How Does Mutual TLS Work? + +Mutual TLS operates by requiring both the client and server to present and verify TLS certificates during the handshake process. Here’s how it works: + +**Client Authentication:** + +1. When a client attempts to connect to the server, the server requests the client’s TLS certificate. +2. The client provides its certificate, which the server verifies against a trusted Certificate Authority (CA). + +**Server Authentication:** + +1. Simultaneously, the server provides its own certificate to the client, which the client verifies against a trusted CA. + +This mutual verification ensures that both parties are legitimate, securing the connection from both ends. + +### Client authorization with mTLS +At the TLS level, authorization means only allowing access for clients who provide client certificates that are verified and trusted by the server. + +Tyk allows you to define a list of trusted certificates at the API level or Gateway (global) level. If you are updating API definition programmatically or via files, you need to set following the keys in your API +definition: +`use_mutual_tls_auth` to `true`, and `client_certificates` as an array of strings - certificate IDs. + +From the Tyk Dashboard, to do the same from the **API Designer Core settings** section you need to select **Mutual TLS** authentication mode from the **Authentication** section, and allow the certificates using the built-in widget, as below: + +mutual_tls_auth + +If all your APIs have a common set of certificates, you can define them in your Gateway configuration file via the `security.certificates.apis` key - string array of certificate IDs or paths. + +Select **Strip Authorization Data** to strip any authorization data from your API requests. + +Be aware that mutual TLS authorization has special treatment because it is not "authentication" and does not provide any identifying functionality, like keys, so you need to mix it with another authentication modes options like **Auth Key** or **Keyless**. On the dashboard, you need to choose **Use multiple auth mechanism** in the **Authentication mode** drop-down, where you should select **Mutual TLS** and another option which suits your use-case. + +### Fallback to HTTP Authorization +The TLS protocol has no access to the HTTP payload and works on the lower level; thus the only information we have at the TLS handshake level is the domain. In fact, even a domain is not included into a TLS handshake by default, but there is TLS extension called SNI (Server Name Indication) +which allows the client to send the domain name to the TLS handshake level. + +With this in mind, the only way to make API authorization work fully at the TLS level, each API protected by Mutual TLS should be deployed on its own domain. + +However, Tyk will gracefully fallback to a client certificate authorization at the HTTP level in cases when you want to have multiple mutual TLS protected APIs on the same domain, or you have clients that do not support the SNI extension. No additional configuration is needed. In case of such fallback, +instead of getting TLS error, a client will receive 403 HTTP error. + +### Authentication +Tyk can be configured to guess a user authentication key based on the provided client certificate. In other words, a user does not need to provide any key, except the certificate, and Tyk will be able to identify the user, apply policies, and do the monitoring - the same as with regular Keys. + +### Using with Authorization +Mutual TLS authentication does not require mutual TLS authorization to be turned on, and can be used separately. For example, you may allow some of the users to be authenticated by using a token in the header or similar, and some of the users via client certificates. + +If you want to use them both, just configure them separately. No additional knowledge is required. + +### Dynamic vs Static mTLS + +There are two ways to set up client mTLS in Tyk: static and dynamic. Each method is suited to different use cases, as outlined below: + +| Use Case | Static | Dynamic | +| ------------------------------------------------------------------ | :----: | :-----: | +| Let developers upload their own public certificates through the Developer Portal | ❌ | βœ… | +| Combine client mTLS with another authentication method | βœ… | βœ… | +| Allow certs at the API level (one or more APIs per cert) | βœ… | ❌ | +| Allow certs at an individual level (one or more APIs per cert) | ❌ | βœ… | + +## Dynamic mTLS + +Dynamic Client mTLS in Tyk allows you to authenticate users based solely on the provided client certificate, without the need for an additional authentication key. Tyk can identify the user, apply policies, and monitor usage just as with regular API keys. + +To set up Dynamic Client mTLS, we need to follow these steps: +* Protect the API: Configure the API in the API Designer by setting the authentication type to Auth Token and enabling Client Certificate. + +* Generate a Self-Signed Certificate: Use OpenSSL to generate a self-signed certificate and key if you don't have one. + +* Add a Key in the Dashboard: In the Tyk Dashboard, create a key for the API and upload only the public certificate. + +* Make an API Request: Use curl with your certificate and key to make an API request to the protected API, ensuring the request returns a 200 response. + +* Allow Developers to Upload Certificates: Create a policy and catalog entry for the API, allowing developers to request keys and upload their public certificates through the Developer Portal. Developers can then make API requests using their cert and private key. + + +### Protect the API + +In the API Designer, set the Authentication Type to Auth Token under Target Details > Authentication mode. Then select Enable Client Certificate. + +Enable Client Certificate + +### Generate a Self-Signed Key Pair + +If you don’t already have a certificate, generate a self-signed key pair using the following command: + +```bash +openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes +``` + +### Add a Key through the Dashboard + +In the Tyk Dashboard, add a key for the API you set up in step #1. When uploading the certificate, ensure you only upload the public certificate. + + + + +The certificate you upload for this key must only be the public certificate. + + + + +### Make an API Request Using the Certificate + +Now you can make a cURL request to the API using the certificate and private key: + +```bash +curl -k --cert cert.pem --key key.pem https://localhost:8080/mtls-api/my-endpoint +``` + +A successful request should return a 200 response. + +### Allow Developers to Upload Certificates + +Instead of manually creating keys, you can allow developers to upload their own certificates via the Developer Portal. + +1. **Create a Policy:** Create a policy for the API you set up earlier. +2. **Create a Catalog Entry:** Create a catalog entry for this policy. +3. **Request a Key through the Portal:** As a developer, request a key for the API through the Portal. This will present a screen where the developer can upload their public certificate. + +portal_cert_request + +Add your public cert (cert.pem from above) into here and hit "Request Key". + +4. **Make an API Request Using the Uploaded Certificate:** After adding the public certificate, developers can make API requests using their cert + private key: + + ```bash + curl -k --cert cert.pem --key key.pem https://localhost:8080/mtls-api/my-endpoint + ``` + + A successful request should return a 200 response. + +## Static mTLS + +Static mTLS allows client certificates to be used at the API level. This method is straightforward and can be combined with another authentication method if needed. + +### Configure the API + +In the API authentication settings, choose mTLS as the authentication type and optionally select an additional authentication method. If you want to use only client certificates without another authentication method, select "keyless" as the other option. + +### Set the Base Identity + +The base identity can be anything, as the client certificate will be the primary authentication method. + + +### Setup Static mTLS in Tyk Operator using the Tyk Classic API Definition + +This setup requires mutual TLS (mTLS) for client authentication using specified client certificates. The example provided shows how to create an API definition with mTLS authentication for `httpbin-client-mtls`. + +1. **Generate Self-Signed Key Pair:** + +You can generate a self-signed key pair using the following OpenSSL command: + +```bash +openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes +``` + +2. **Create Kubernetes Secret:** + +Create a secret in Kubernetes to store the client certificate: + +```bash +kubectl create secret tls my-test-tls --cert cert.pem --key key.pem +``` + +3. **Create API Definition:** + +Below is the YAML configuration for an API that uses mTLS authentication. Note that the `client_certificate_refs` field references the Kubernetes secret created in the previous step. + +```yaml {hl_lines=["19-21"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-client-mtls +spec: + name: Httpbin Client MTLS + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + use_mutual_tls_auth: true + client_certificate_refs: + - my-test-tls +``` + +### Setup Static mTLS in Tyk Operator using Tyk OAS API Definition + +Client certificates, In Tyk OAS API Definition, are managed using the `TykOasApiDefinition` CRD. You can reference Kubernetes secrets that store client certificates in your API definitions. + +**Example of Referencing Client Certificates in Tyk OAS** + +In this example, the `clientCertificate` section allows you to enable client certificate management and specify a list of Kubernetes secrets (`tls-cert`) that store allowed client certificates. + +```yaml {hl_lines=["48-50"],linenos=false} +# Secret is not created in this manifest. +# Please store client certificate in k8s TLS secret `tls-cert`. + +apiVersion: v1 +data: + test_oas.json: |- + { + "info": { + "title": "Petstore", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": {}, + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "Petstore", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://petstore.swagger.io/v2" + }, + "server": { + "listenPath": { + "value": "/petstore/", + "strip": true + } + } + } + } +kind: ConfigMap +metadata: + name: cm + namespace: default +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: petstore +spec: + tykOAS: + configmapRef: + name: cm + namespace: default + keyName: test_oas.json + clientCertificate: + enabled: true + allowlist: [tls-cert] +``` + + +## FAQ + +* **Why am I getting an error stating that certificates are not enabled for this API?** + + This issue can occur because client mTLS is an extension of Auth Token authentication mode. To enable this feature, ensure the API definition has `auth.use_certificate` set to `true`. + +* **Can I upload a full certificate chain when creating a key for dynamic client mTLS?** + + Yes, you can do this when manually creating a key as an Admin Dashboard user. However, through the Portal, you must upload only the public key (certificate). + +* **Can I use a root CA with client mTLS?** + + Yes, Tyk allows you to upload a root CA certificate for static mTLS authentication. This setup allows clients with certificates signed by the registered CA to be validated. + + **Key Points:** + + * The root CA certificate can be uploaded as a client certificate. + * Clients presenting certificates signed by this CA will be validated. + * Tyk traverses the certificate chain for validation. + + + + Root CA certificates are compatible only with Static mTLS and not with Dynamic mTLS. + + + + diff --git a/branches-config.json b/branches-config.json index 35b9d06a8..b3da5ab58 100644 --- a/branches-config.json +++ b/branches-config.json @@ -1,18 +1,35 @@ { "versions": [ { - "branch": "release-5.8", + "branch": "release-5.10", "isLatest": true, - "folder": "5.8", - "label": "v5.8 (Latest)" + "sourceFolder": "5.10-source", + "targetFolder": "5.10", + "label": "v5.10 (latest)" }, { "branch": "main", "isLatest": false, "isMain": true, - "folder": "nightly", + "sourceFolder": "nightly-source", + "targetFolder": "nightly", "label": "Nightly" }, + { + "branch": "release-5.9", + "isLatest": true, + "sourceFolder": "5.9-source", + "targetFolder": "5.9", + "label": "v5.9" + }, + { + "branch": "release-5.8", + "isLatest": false, + "isMain": false, + "sourceFolder": "5.8-source", + "targetFolder": "5.8", + "label": "v5.8 (LTS)" + }, { "isExternal": true, "externalUrl": "https://tyk.io/docs/5.7", diff --git a/calculator.js b/calculator.js new file mode 100644 index 000000000..2f6a9d113 --- /dev/null +++ b/calculator.js @@ -0,0 +1,3 @@ +/*! For license information please see main.b7b66d3b.js.LICENSE.txt */ +(() => { "use strict"; var __webpack_modules__ = { 111: (__unused_webpack_module, __webpack_exports__, __webpack_require__) => { __webpack_require__.d(__webpack_exports__, { A: () => __WEBPACK_DEFAULT_EXPORT__ }); var formik__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(892), _components_Calculator_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(297), react_jsx_runtime__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(579); const SUM = "sum", __WEBPACK_DEFAULT_EXPORT__ = _ref => { let { fields: fields } = _ref; const initialValues = {}; return Object.values(fields).map((e => Object.values(e).map((e => initialValues[e.name] = e.defaultValue || 0)))), (0, react_jsx_runtime__WEBPACK_IMPORTED_MODULE_2__.jsx)(formik__WEBPACK_IMPORTED_MODULE_0__.l1, { initialValues: initialValues, onSubmit: (values, _ref2) => { let { setFieldValue: setFieldValue } = _ref2, sum = 0, x; Object.values(fields.calculated_fields).map((_ref3 => { let { name: name, value: value } = _ref3; x = eval(value), sum += x, name.startsWith(SUM), setFieldValue(name, x) })) }, component: e => (0, react_jsx_runtime__WEBPACK_IMPORTED_MODULE_2__.jsx)(_components_Calculator_js__WEBPACK_IMPORTED_MODULE_1__.A, { fields: fields, onSubmit: !0, ...e }) }) } }, 297: (e, t, n) => { n.d(t, { A: () => u }); var r = n(892), a = n(579); const l = e => { if (e < 0 || 100 < e) return "Please enter a value between 0-100." }, o = e => { if (e < 0) return "Please enter a positive integer value." }, i = e => { let { fields: t, values: n, errors: i, fixed: u, disabled: s, handleChange: c } = e; return (0, a.jsx)(a.Fragment, { children: Object.entries(t).map((e => { let [t, { name: f, label: d, type: p, unit: m, description: h }] = e; return (0, a.jsxs)("div", { children: [(0, a.jsxs)("div", { style: { display: "flex", alignItems: "center" }, children: [(0, a.jsx)("label", { htmlFor: f, title: h, style: { marginRight: "auto" }, children: d }), (0, a.jsx)(r.D0, { id: f, name: f, type: p, value: u ? n[f].toFixed(4) : n[f], onChange: c, disabled: s, validate: "Percent" === m ? l : o, title: h, required: !0, style: { margin: "4px 10px", color: "black" } }), (0, a.jsx)("label", { style: { width: "80px" }, children: m })] }), i[f] ? (0, a.jsx)("div", { style: { color: "red" }, children: i[f] }) : null] }, t) })) }) }, u = e => { let { handleSubmit: t, handleChange: n, handleBlur: l, values: o, errors: u, fields: s } = e; return (0, a.jsxs)(r.lV, { onSubmit: t, children: [Object.keys(s).map((e => (0, a.jsxs)("div", { children: [(0, a.jsx)(i, { fields: s[e], values: o, errors: u, fixed: "calculated_fields" === e, disabled: "input_fields" !== e, handleChange: n }), (0, a.jsx)("hr", { style: { margin: "1rem 0" } })] }, e))), (0, a.jsx)("button", { type: "submit", disabled: 0 !== Object.keys(u).length, style: { width: "100%", backgroundColor: "#8438fa", borderColor: "#8438fa", color: "white" }, children: "Calculate" })] }) } }, 892: (e, t, n) => { n.d(t, { D0: () => gr, lV: () => _r, l1: () => mr }); var r = n(43), a = n(366), l = n.n(a), o = function (e) { return function (e) { return !!e && "object" === typeof e }(e) && !function (e) { var t = Object.prototype.toString.call(e); return "[object RegExp]" === t || "[object Date]" === t || function (e) { return e.$$typeof === i }(e) }(e) }; var i = "function" === typeof Symbol && Symbol.for ? Symbol.for("react.element") : 60103; function u(e, t) { return !1 !== t.clone && t.isMergeableObject(e) ? c((n = e, Array.isArray(n) ? [] : {}), e, t) : e; var n } function s(e, t, n) { return e.concat(t).map((function (e) { return u(e, n) })) } function c(e, t, n) { (n = n || {}).arrayMerge = n.arrayMerge || s, n.isMergeableObject = n.isMergeableObject || o; var r = Array.isArray(t); return r === Array.isArray(e) ? r ? n.arrayMerge(e, t, n) : function (e, t, n) { var r = {}; return n.isMergeableObject(e) && Object.keys(e).forEach((function (t) { r[t] = u(e[t], n) })), Object.keys(t).forEach((function (a) { n.isMergeableObject(t[a]) && e[a] ? r[a] = c(e[a], t[a], n) : r[a] = u(t[a], n) })), r }(e, t, n) : u(t, n) } c.all = function (e, t) { if (!Array.isArray(e)) throw new Error("first argument should be an array"); return e.reduce((function (e, n) { return c(e, n, t) }), {}) }; const f = c; const d = "object" == typeof global && global && global.Object === Object && global; var p = "object" == typeof self && self && self.Object === Object && self; const m = d || p || Function("return this")(); const h = m.Symbol; var y = Object.prototype, v = y.hasOwnProperty, b = y.toString, g = h ? h.toStringTag : void 0; const _ = function (e) { var t = v.call(e, g), n = e[g]; try { e[g] = void 0; var r = !0 } catch (l) { } var a = b.call(e); return r && (t ? e[g] = n : delete e[g]), a }; var S = Object.prototype.toString; const w = function (e) { return S.call(e) }; var k = h ? h.toStringTag : void 0; const E = function (e) { return null == e ? void 0 === e ? "[object Undefined]" : "[object Null]" : k && k in Object(e) ? _(e) : w(e) }; const x = function (e, t) { return function (n) { return e(t(n)) } }; const C = x(Object.getPrototypeOf, Object); const T = function (e) { return null != e && "object" == typeof e }; var P = Function.prototype, O = Object.prototype, z = P.toString, j = O.hasOwnProperty, R = z.call(Object); const A = function (e) { if (!T(e) || "[object Object]" != E(e)) return !1; var t = C(e); if (null === t) return !0; var n = j.call(t, "constructor") && t.constructor; return "function" == typeof n && n instanceof n && z.call(n) == R }; const N = function () { this.__data__ = [], this.size = 0 }; const M = function (e, t) { return e === t || e !== e && t !== t }; const L = function (e, t) { for (var n = e.length; n--;)if (M(e[n][0], t)) return n; return -1 }; var I = Array.prototype.splice; const F = function (e) { var t = this.__data__, n = L(t, e); return !(n < 0) && (n == t.length - 1 ? t.pop() : I.call(t, n, 1), --this.size, !0) }; const D = function (e) { var t = this.__data__, n = L(t, e); return n < 0 ? void 0 : t[n][1] }; const U = function (e) { return L(this.__data__, e) > -1 }; const V = function (e, t) { var n = this.__data__, r = L(n, e); return r < 0 ? (++this.size, n.push([e, t])) : n[r][1] = t, this }; function B(e) { var t = -1, n = null == e ? 0 : e.length; for (this.clear(); ++t < n;) { var r = e[t]; this.set(r[0], r[1]) } } B.prototype.clear = N, B.prototype.delete = F, B.prototype.get = D, B.prototype.has = U, B.prototype.set = V; const $ = B; const q = function () { this.__data__ = new $, this.size = 0 }; const W = function (e) { var t = this.__data__, n = t.delete(e); return this.size = t.size, n }; const H = function (e) { return this.__data__.get(e) }; const Q = function (e) { return this.__data__.has(e) }; const K = function (e) { var t = typeof e; return null != e && ("object" == t || "function" == t) }; const G = function (e) { if (!K(e)) return !1; var t = E(e); return "[object Function]" == t || "[object GeneratorFunction]" == t || "[object AsyncFunction]" == t || "[object Proxy]" == t }; const Y = m["__core-js_shared__"]; var X = function () { var e = /[^.]+$/.exec(Y && Y.keys && Y.keys.IE_PROTO || ""); return e ? "Symbol(src)_1." + e : "" }(); const Z = function (e) { return !!X && X in e }; var J = Function.prototype.toString; const ee = function (e) { if (null != e) { try { return J.call(e) } catch (t) { } try { return e + "" } catch (t) { } } return "" }; var te = /^\[object .+?Constructor\]$/, ne = Function.prototype, re = Object.prototype, ae = ne.toString, le = re.hasOwnProperty, oe = RegExp("^" + ae.call(le).replace(/[\\^$.*+?()[\]{}|]/g, "\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g, "$1.*?") + "$"); const ie = function (e) { return !(!K(e) || Z(e)) && (G(e) ? oe : te).test(ee(e)) }; const ue = function (e, t) { return null == e ? void 0 : e[t] }; const se = function (e, t) { var n = ue(e, t); return ie(n) ? n : void 0 }; const ce = se(m, "Map"); const fe = se(Object, "create"); const de = function () { this.__data__ = fe ? fe(null) : {}, this.size = 0 }; const pe = function (e) { var t = this.has(e) && delete this.__data__[e]; return this.size -= t ? 1 : 0, t }; var me = Object.prototype.hasOwnProperty; const he = function (e) { var t = this.__data__; if (fe) { var n = t[e]; return "__lodash_hash_undefined__" === n ? void 0 : n } return me.call(t, e) ? t[e] : void 0 }; var ye = Object.prototype.hasOwnProperty; const ve = function (e) { var t = this.__data__; return fe ? void 0 !== t[e] : ye.call(t, e) }; const be = function (e, t) { var n = this.__data__; return this.size += this.has(e) ? 0 : 1, n[e] = fe && void 0 === t ? "__lodash_hash_undefined__" : t, this }; function ge(e) { var t = -1, n = null == e ? 0 : e.length; for (this.clear(); ++t < n;) { var r = e[t]; this.set(r[0], r[1]) } } ge.prototype.clear = de, ge.prototype.delete = pe, ge.prototype.get = he, ge.prototype.has = ve, ge.prototype.set = be; const _e = ge; const Se = function () { this.size = 0, this.__data__ = { hash: new _e, map: new (ce || $), string: new _e } }; const we = function (e) { var t = typeof e; return "string" == t || "number" == t || "symbol" == t || "boolean" == t ? "__proto__" !== e : null === e }; const ke = function (e, t) { var n = e.__data__; return we(t) ? n["string" == typeof t ? "string" : "hash"] : n.map }; const Ee = function (e) { var t = ke(this, e).delete(e); return this.size -= t ? 1 : 0, t }; const xe = function (e) { return ke(this, e).get(e) }; const Ce = function (e) { return ke(this, e).has(e) }; const Te = function (e, t) { var n = ke(this, e), r = n.size; return n.set(e, t), this.size += n.size == r ? 0 : 1, this }; function Pe(e) { var t = -1, n = null == e ? 0 : e.length; for (this.clear(); ++t < n;) { var r = e[t]; this.set(r[0], r[1]) } } Pe.prototype.clear = Se, Pe.prototype.delete = Ee, Pe.prototype.get = xe, Pe.prototype.has = Ce, Pe.prototype.set = Te; const Oe = Pe; const ze = function (e, t) { var n = this.__data__; if (n instanceof $) { var r = n.__data__; if (!ce || r.length < 199) return r.push([e, t]), this.size = ++n.size, this; n = this.__data__ = new Oe(r) } return n.set(e, t), this.size = n.size, this }; function je(e) { var t = this.__data__ = new $(e); this.size = t.size } je.prototype.clear = q, je.prototype.delete = W, je.prototype.get = H, je.prototype.has = Q, je.prototype.set = ze; const Re = je; const Ae = function (e, t) { for (var n = -1, r = null == e ? 0 : e.length; ++n < r && !1 !== t(e[n], n, e);); return e }; const Ne = function () { try { var e = se(Object, "defineProperty"); return e({}, "", {}), e } catch (t) { } }(); const Me = function (e, t, n) { "__proto__" == t && Ne ? Ne(e, t, { configurable: !0, enumerable: !0, value: n, writable: !0 }) : e[t] = n }; var Le = Object.prototype.hasOwnProperty; const Ie = function (e, t, n) { var r = e[t]; Le.call(e, t) && M(r, n) && (void 0 !== n || t in e) || Me(e, t, n) }; const Fe = function (e, t, n, r) { var a = !n; n || (n = {}); for (var l = -1, o = t.length; ++l < o;) { var i = t[l], u = r ? r(n[i], e[i], i, n, e) : void 0; void 0 === u && (u = e[i]), a ? Me(n, i, u) : Ie(n, i, u) } return n }; const De = function (e, t) { for (var n = -1, r = Array(e); ++n < e;)r[n] = t(n); return r }; const Ue = function (e) { return T(e) && "[object Arguments]" == E(e) }; var Ve = Object.prototype, Be = Ve.hasOwnProperty, $e = Ve.propertyIsEnumerable; const qe = Ue(function () { return arguments }()) ? Ue : function (e) { return T(e) && Be.call(e, "callee") && !$e.call(e, "callee") }; const We = Array.isArray; const He = function () { return !1 }; var Qe = "object" == typeof exports && exports && !exports.nodeType && exports, Ke = Qe && "object" == typeof module && module && !module.nodeType && module, Ge = Ke && Ke.exports === Qe ? m.Buffer : void 0; const Ye = (Ge ? Ge.isBuffer : void 0) || He; var Xe = /^(?:0|[1-9]\d*)$/; const Ze = function (e, t) { var n = typeof e; return !!(t = null == t ? 9007199254740991 : t) && ("number" == n || "symbol" != n && Xe.test(e)) && e > -1 && e % 1 == 0 && e < t }; const Je = function (e) { return "number" == typeof e && e > -1 && e % 1 == 0 && e <= 9007199254740991 }; var et = {}; et["[object Float32Array]"] = et["[object Float64Array]"] = et["[object Int8Array]"] = et["[object Int16Array]"] = et["[object Int32Array]"] = et["[object Uint8Array]"] = et["[object Uint8ClampedArray]"] = et["[object Uint16Array]"] = et["[object Uint32Array]"] = !0, et["[object Arguments]"] = et["[object Array]"] = et["[object ArrayBuffer]"] = et["[object Boolean]"] = et["[object DataView]"] = et["[object Date]"] = et["[object Error]"] = et["[object Function]"] = et["[object Map]"] = et["[object Number]"] = et["[object Object]"] = et["[object RegExp]"] = et["[object Set]"] = et["[object String]"] = et["[object WeakMap]"] = !1; const tt = function (e) { return T(e) && Je(e.length) && !!et[E(e)] }; const nt = function (e) { return function (t) { return e(t) } }; var rt = "object" == typeof exports && exports && !exports.nodeType && exports, at = rt && "object" == typeof module && module && !module.nodeType && module, lt = at && at.exports === rt && d.process; const ot = function () { try { var e = at && at.require && at.require("util").types; return e || lt && lt.binding && lt.binding("util") } catch (t) { } }(); var it = ot && ot.isTypedArray; const ut = it ? nt(it) : tt; var st = Object.prototype.hasOwnProperty; const ct = function (e, t) { var n = We(e), r = !n && qe(e), a = !n && !r && Ye(e), l = !n && !r && !a && ut(e), o = n || r || a || l, i = o ? De(e.length, String) : [], u = i.length; for (var s in e) !t && !st.call(e, s) || o && ("length" == s || a && ("offset" == s || "parent" == s) || l && ("buffer" == s || "byteLength" == s || "byteOffset" == s) || Ze(s, u)) || i.push(s); return i }; var ft = Object.prototype; const dt = function (e) { var t = e && e.constructor; return e === ("function" == typeof t && t.prototype || ft) }; const pt = x(Object.keys, Object); var mt = Object.prototype.hasOwnProperty; const ht = function (e) { if (!dt(e)) return pt(e); var t = []; for (var n in Object(e)) mt.call(e, n) && "constructor" != n && t.push(n); return t }; const yt = function (e) { return null != e && Je(e.length) && !G(e) }; const vt = function (e) { return yt(e) ? ct(e) : ht(e) }; const bt = function (e, t) { return e && Fe(t, vt(t), e) }; const gt = function (e) { var t = []; if (null != e) for (var n in Object(e)) t.push(n); return t }; var _t = Object.prototype.hasOwnProperty; const St = function (e) { if (!K(e)) return gt(e); var t = dt(e), n = []; for (var r in e) ("constructor" != r || !t && _t.call(e, r)) && n.push(r); return n }; const wt = function (e) { return yt(e) ? ct(e, !0) : St(e) }; const kt = function (e, t) { return e && Fe(t, wt(t), e) }; var Et = "object" == typeof exports && exports && !exports.nodeType && exports, xt = Et && "object" == typeof module && module && !module.nodeType && module, Ct = xt && xt.exports === Et ? m.Buffer : void 0, Tt = Ct ? Ct.allocUnsafe : void 0; const Pt = function (e, t) { if (t) return e.slice(); var n = e.length, r = Tt ? Tt(n) : new e.constructor(n); return e.copy(r), r }; const Ot = function (e, t) { var n = -1, r = e.length; for (t || (t = Array(r)); ++n < r;)t[n] = e[n]; return t }; const zt = function (e, t) { for (var n = -1, r = null == e ? 0 : e.length, a = 0, l = []; ++n < r;) { var o = e[n]; t(o, n, e) && (l[a++] = o) } return l }; const jt = function () { return [] }; var Rt = Object.prototype.propertyIsEnumerable, At = Object.getOwnPropertySymbols; const Nt = At ? function (e) { return null == e ? [] : (e = Object(e), zt(At(e), (function (t) { return Rt.call(e, t) }))) } : jt; const Mt = function (e, t) { return Fe(e, Nt(e), t) }; const Lt = function (e, t) { for (var n = -1, r = t.length, a = e.length; ++n < r;)e[a + n] = t[n]; return e }; const It = Object.getOwnPropertySymbols ? function (e) { for (var t = []; e;)Lt(t, Nt(e)), e = C(e); return t } : jt; const Ft = function (e, t) { return Fe(e, It(e), t) }; const Dt = function (e, t, n) { var r = t(e); return We(e) ? r : Lt(r, n(e)) }; const Ut = function (e) { return Dt(e, vt, Nt) }; const Vt = function (e) { return Dt(e, wt, It) }; const Bt = se(m, "DataView"); const $t = se(m, "Promise"); const qt = se(m, "Set"); const Wt = se(m, "WeakMap"); var Ht = "[object Map]", Qt = "[object Promise]", Kt = "[object Set]", Gt = "[object WeakMap]", Yt = "[object DataView]", Xt = ee(Bt), Zt = ee(ce), Jt = ee($t), en = ee(qt), tn = ee(Wt), nn = E; (Bt && nn(new Bt(new ArrayBuffer(1))) != Yt || ce && nn(new ce) != Ht || $t && nn($t.resolve()) != Qt || qt && nn(new qt) != Kt || Wt && nn(new Wt) != Gt) && (nn = function (e) { var t = E(e), n = "[object Object]" == t ? e.constructor : void 0, r = n ? ee(n) : ""; if (r) switch (r) { case Xt: return Yt; case Zt: return Ht; case Jt: return Qt; case en: return Kt; case tn: return Gt }return t }); const rn = nn; var an = Object.prototype.hasOwnProperty; const ln = function (e) { var t = e.length, n = new e.constructor(t); return t && "string" == typeof e[0] && an.call(e, "index") && (n.index = e.index, n.input = e.input), n }; const on = m.Uint8Array; const un = function (e) { var t = new e.constructor(e.byteLength); return new on(t).set(new on(e)), t }; const sn = function (e, t) { var n = t ? un(e.buffer) : e.buffer; return new e.constructor(n, e.byteOffset, e.byteLength) }; var cn = /\w*$/; const fn = function (e) { var t = new e.constructor(e.source, cn.exec(e)); return t.lastIndex = e.lastIndex, t }; var dn = h ? h.prototype : void 0, pn = dn ? dn.valueOf : void 0; const mn = function (e) { return pn ? Object(pn.call(e)) : {} }; const hn = function (e, t) { var n = t ? un(e.buffer) : e.buffer; return new e.constructor(n, e.byteOffset, e.length) }; const yn = function (e, t, n) { var r = e.constructor; switch (t) { case "[object ArrayBuffer]": return un(e); case "[object Boolean]": case "[object Date]": return new r(+e); case "[object DataView]": return sn(e, n); case "[object Float32Array]": case "[object Float64Array]": case "[object Int8Array]": case "[object Int16Array]": case "[object Int32Array]": case "[object Uint8Array]": case "[object Uint8ClampedArray]": case "[object Uint16Array]": case "[object Uint32Array]": return hn(e, n); case "[object Map]": case "[object Set]": return new r; case "[object Number]": case "[object String]": return new r(e); case "[object RegExp]": return fn(e); case "[object Symbol]": return mn(e) } }; var vn = Object.create; const bn = function () { function e() { } return function (t) { if (!K(t)) return {}; if (vn) return vn(t); e.prototype = t; var n = new e; return e.prototype = void 0, n } }(); const gn = function (e) { return "function" != typeof e.constructor || dt(e) ? {} : bn(C(e)) }; const _n = function (e) { return T(e) && "[object Map]" == rn(e) }; var Sn = ot && ot.isMap; const wn = Sn ? nt(Sn) : _n; const kn = function (e) { return T(e) && "[object Set]" == rn(e) }; var En = ot && ot.isSet; const xn = En ? nt(En) : kn; var Cn = "[object Arguments]", Tn = "[object Function]", Pn = "[object Object]", On = {}; On[Cn] = On["[object Array]"] = On["[object ArrayBuffer]"] = On["[object DataView]"] = On["[object Boolean]"] = On["[object Date]"] = On["[object Float32Array]"] = On["[object Float64Array]"] = On["[object Int8Array]"] = On["[object Int16Array]"] = On["[object Int32Array]"] = On["[object Map]"] = On["[object Number]"] = On[Pn] = On["[object RegExp]"] = On["[object Set]"] = On["[object String]"] = On["[object Symbol]"] = On["[object Uint8Array]"] = On["[object Uint8ClampedArray]"] = On["[object Uint16Array]"] = On["[object Uint32Array]"] = !0, On["[object Error]"] = On[Tn] = On["[object WeakMap]"] = !1; const zn = function e(t, n, r, a, l, o) { var i, u = 1 & n, s = 2 & n, c = 4 & n; if (r && (i = l ? r(t, a, l, o) : r(t)), void 0 !== i) return i; if (!K(t)) return t; var f = We(t); if (f) { if (i = ln(t), !u) return Ot(t, i) } else { var d = rn(t), p = d == Tn || "[object GeneratorFunction]" == d; if (Ye(t)) return Pt(t, u); if (d == Pn || d == Cn || p && !l) { if (i = s || p ? {} : gn(t), !u) return s ? Ft(t, kt(i, t)) : Mt(t, bt(i, t)) } else { if (!On[d]) return l ? t : {}; i = yn(t, d, u) } } o || (o = new Re); var m = o.get(t); if (m) return m; o.set(t, i), xn(t) ? t.forEach((function (a) { i.add(e(a, n, r, a, t, o)) })) : wn(t) && t.forEach((function (a, l) { i.set(l, e(a, n, r, l, t, o)) })); var h = f ? void 0 : (c ? s ? Vt : Ut : s ? wt : vt)(t); return Ae(h || t, (function (a, l) { h && (a = t[l = a]), Ie(i, l, e(a, n, r, l, t, o)) })), i }; const jn = function (e) { return zn(e, 4) }; const Rn = function (e, t) { for (var n = -1, r = null == e ? 0 : e.length, a = Array(r); ++n < r;)a[n] = t(e[n], n, e); return a }; const An = function (e) { return "symbol" == typeof e || T(e) && "[object Symbol]" == E(e) }; function Nn(e, t) { if ("function" != typeof e || null != t && "function" != typeof t) throw new TypeError("Expected a function"); var n = function () { var r = arguments, a = t ? t.apply(this, r) : r[0], l = n.cache; if (l.has(a)) return l.get(a); var o = e.apply(this, r); return n.cache = l.set(a, o) || l, o }; return n.cache = new (Nn.Cache || Oe), n } Nn.Cache = Oe; const Mn = Nn; var Ln = /[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g, In = /\\(\\)?/g; const Fn = function (e) { var t = Mn(e, (function (e) { return 500 === n.size && n.clear(), e })), n = t.cache; return t }((function (e) { var t = []; return 46 === e.charCodeAt(0) && t.push(""), e.replace(Ln, (function (e, n, r, a) { t.push(r ? a.replace(In, "$1") : n || e) })), t })); const Dn = function (e) { if ("string" == typeof e || An(e)) return e; var t = e + ""; return "0" == t && 1 / e == -1 / 0 ? "-0" : t }; var Un = h ? h.prototype : void 0, Vn = Un ? Un.toString : void 0; const Bn = function e(t) { if ("string" == typeof t) return t; if (We(t)) return Rn(t, e) + ""; if (An(t)) return Vn ? Vn.call(t) : ""; var n = t + ""; return "0" == n && 1 / t == -1 / 0 ? "-0" : n }; const $n = function (e) { return null == e ? "" : Bn(e) }; const qn = function (e) { return We(e) ? Rn(e, Dn) : An(e) ? [e] : Ot(Fn($n(e))) }; const Wn = function (e, t) { }; n(219); const Hn = function (e) { return zn(e, 5) }; function Qn() { return Qn = Object.assign || function (e) { for (var t = 1; t < arguments.length; t++) { var n = arguments[t]; for (var r in n) Object.prototype.hasOwnProperty.call(n, r) && (e[r] = n[r]) } return e }, Qn.apply(this, arguments) } function Kn(e, t) { e.prototype = Object.create(t.prototype), e.prototype.constructor = e, e.__proto__ = t } function Gn(e, t) { if (null == e) return {}; var n, r, a = {}, l = Object.keys(e); for (r = 0; r < l.length; r++)n = l[r], t.indexOf(n) >= 0 || (a[n] = e[n]); return a } function Yn(e) { if (void 0 === e) throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); return e } var Xn = function (e) { return Array.isArray(e) && 0 === e.length }, Zn = function (e) { return "function" === typeof e }, Jn = function (e) { return null !== e && "object" === typeof e }, er = function (e) { return String(Math.floor(Number(e))) === e }, tr = function (e) { return "[object String]" === Object.prototype.toString.call(e) }, nr = function (e) { return 0 === r.Children.count(e) }, rr = function (e) { return Jn(e) && Zn(e.then) }; function ar(e, t, n, r) { void 0 === r && (r = 0); for (var a = qn(t); e && r < a.length;)e = e[a[r++]]; return void 0 === e ? n : e } function lr(e, t, n) { for (var r = jn(e), a = r, l = 0, o = qn(t); l < o.length - 1; l++) { var i = o[l], u = ar(e, o.slice(0, l + 1)); if (u && (Jn(u) || Array.isArray(u))) a = a[i] = jn(u); else { var s = o[l + 1]; a = a[i] = er(s) && Number(s) >= 0 ? [] : {} } } return (0 === l ? e : a)[o[l]] === n ? e : (void 0 === n ? delete a[o[l]] : a[o[l]] = n, 0 === l && void 0 === n && delete r[o[l]], r) } function or(e, t, n, r) { void 0 === n && (n = new WeakMap), void 0 === r && (r = {}); for (var a = 0, l = Object.keys(e); a < l.length; a++) { var o = l[a], i = e[o]; Jn(i) ? n.get(i) || (n.set(i, !0), r[o] = Array.isArray(i) ? [] : {}, or(i, t, n, r[o])) : r[o] = t } return r } var ir = (0, r.createContext)(void 0); ir.displayName = "FormikContext"; var ur = ir.Provider; ir.Consumer; function sr() { var e = (0, r.useContext)(ir); return e || Wn(!1), e } function cr(e, t) { switch (t.type) { case "SET_VALUES": return Qn({}, e, { values: t.payload }); case "SET_TOUCHED": return Qn({}, e, { touched: t.payload }); case "SET_ERRORS": return l()(e.errors, t.payload) ? e : Qn({}, e, { errors: t.payload }); case "SET_STATUS": return Qn({}, e, { status: t.payload }); case "SET_ISSUBMITTING": return Qn({}, e, { isSubmitting: t.payload }); case "SET_ISVALIDATING": return Qn({}, e, { isValidating: t.payload }); case "SET_FIELD_VALUE": return Qn({}, e, { values: lr(e.values, t.payload.field, t.payload.value) }); case "SET_FIELD_TOUCHED": return Qn({}, e, { touched: lr(e.touched, t.payload.field, t.payload.value) }); case "SET_FIELD_ERROR": return Qn({}, e, { errors: lr(e.errors, t.payload.field, t.payload.value) }); case "RESET_FORM": return Qn({}, e, t.payload); case "SET_FORMIK_STATE": return t.payload(e); case "SUBMIT_ATTEMPT": return Qn({}, e, { touched: or(e.values, !0), isSubmitting: !0, submitCount: e.submitCount + 1 }); case "SUBMIT_FAILURE": case "SUBMIT_SUCCESS": return Qn({}, e, { isSubmitting: !1 }); default: return e } } var fr = {}, dr = {}; function pr(e) { var t = e.validateOnChange, n = void 0 === t || t, a = e.validateOnBlur, o = void 0 === a || a, i = e.validateOnMount, u = void 0 !== i && i, s = e.isInitialValid, c = e.enableReinitialize, d = void 0 !== c && c, p = e.onSubmit, m = Gn(e, ["validateOnChange", "validateOnBlur", "validateOnMount", "isInitialValid", "enableReinitialize", "onSubmit"]), h = Qn({ validateOnChange: n, validateOnBlur: o, validateOnMount: u, onSubmit: p }, m), y = (0, r.useRef)(h.initialValues), v = (0, r.useRef)(h.initialErrors || fr), b = (0, r.useRef)(h.initialTouched || dr), g = (0, r.useRef)(h.initialStatus), _ = (0, r.useRef)(!1), S = (0, r.useRef)({}); (0, r.useEffect)((function () { return _.current = !0, function () { _.current = !1 } }), []); var w = (0, r.useReducer)(cr, { values: h.initialValues, errors: h.initialErrors || fr, touched: h.initialTouched || dr, status: h.initialStatus, isSubmitting: !1, isValidating: !1, submitCount: 0 }), k = w[0], E = w[1], x = (0, r.useCallback)((function (e, t) { return new Promise((function (n, r) { var a = h.validate(e, t); null == a ? n(fr) : rr(a) ? a.then((function (e) { n(e || fr) }), (function (e) { r(e) })) : n(a) })) }), [h.validate]), C = (0, r.useCallback)((function (e, t) { var n = h.validationSchema, r = Zn(n) ? n(t) : n, a = t && r.validateAt ? r.validateAt(t, e) : function (e, t, n, r) { void 0 === n && (n = !1); void 0 === r && (r = {}); var a = hr(e); return t[n ? "validateSync" : "validate"](a, { abortEarly: !1, context: r }) }(e, r); return new Promise((function (e, t) { a.then((function () { e(fr) }), (function (n) { "ValidationError" === n.name ? e(function (e) { var t = {}; if (e.inner) { if (0 === e.inner.length) return lr(t, e.path, e.message); var n = e.inner, r = Array.isArray(n), a = 0; for (n = r ? n : n[Symbol.iterator](); ;) { var l; if (r) { if (a >= n.length) break; l = n[a++] } else { if ((a = n.next()).done) break; l = a.value } var o = l; ar(t, o.path) || (t = lr(t, o.path, o.message)) } } return t }(n)) : t(n) })) })) }), [h.validationSchema]), T = (0, r.useCallback)((function (e, t) { return new Promise((function (n) { return n(S.current[e].validate(t)) })) }), []), P = (0, r.useCallback)((function (e) { var t = Object.keys(S.current).filter((function (e) { return Zn(S.current[e].validate) })), n = t.length > 0 ? t.map((function (t) { return T(t, ar(e, t)) })) : [Promise.resolve("DO_NOT_DELETE_YOU_WILL_BE_FIRED")]; return Promise.all(n).then((function (e) { return e.reduce((function (e, n, r) { return "DO_NOT_DELETE_YOU_WILL_BE_FIRED" === n || n && (e = lr(e, t[r], n)), e }), {}) })) }), [T]), O = (0, r.useCallback)((function (e) { return Promise.all([P(e), h.validationSchema ? C(e) : {}, h.validate ? x(e) : {}]).then((function (e) { var t = e[0], n = e[1], r = e[2]; return f.all([t, n, r], { arrayMerge: yr }) })) }), [h.validate, h.validationSchema, P, x, C]), z = br((function (e) { return void 0 === e && (e = k.values), E({ type: "SET_ISVALIDATING", payload: !0 }), O(e).then((function (e) { return _.current && (E({ type: "SET_ISVALIDATING", payload: !1 }), E({ type: "SET_ERRORS", payload: e })), e })) })); (0, r.useEffect)((function () { u && !0 === _.current && l()(y.current, h.initialValues) && z(y.current) }), [u, z]); var j = (0, r.useCallback)((function (e) { var t = e && e.values ? e.values : y.current, n = e && e.errors ? e.errors : v.current ? v.current : h.initialErrors || {}, r = e && e.touched ? e.touched : b.current ? b.current : h.initialTouched || {}, a = e && e.status ? e.status : g.current ? g.current : h.initialStatus; y.current = t, v.current = n, b.current = r, g.current = a; var l = function () { E({ type: "RESET_FORM", payload: { isSubmitting: !!e && !!e.isSubmitting, errors: n, touched: r, status: a, values: t, isValidating: !!e && !!e.isValidating, submitCount: e && e.submitCount && "number" === typeof e.submitCount ? e.submitCount : 0 } }) }; if (h.onReset) { var o = h.onReset(k.values, Y); rr(o) ? o.then(l) : l() } else l() }), [h.initialErrors, h.initialStatus, h.initialTouched]); (0, r.useEffect)((function () { !0 !== _.current || l()(y.current, h.initialValues) || (d && (y.current = h.initialValues, j()), u && z(y.current)) }), [d, h.initialValues, j, u, z]), (0, r.useEffect)((function () { d && !0 === _.current && !l()(v.current, h.initialErrors) && (v.current = h.initialErrors || fr, E({ type: "SET_ERRORS", payload: h.initialErrors || fr })) }), [d, h.initialErrors]), (0, r.useEffect)((function () { d && !0 === _.current && !l()(b.current, h.initialTouched) && (b.current = h.initialTouched || dr, E({ type: "SET_TOUCHED", payload: h.initialTouched || dr })) }), [d, h.initialTouched]), (0, r.useEffect)((function () { d && !0 === _.current && !l()(g.current, h.initialStatus) && (g.current = h.initialStatus, E({ type: "SET_STATUS", payload: h.initialStatus })) }), [d, h.initialStatus, h.initialTouched]); var R = br((function (e) { if (S.current[e] && Zn(S.current[e].validate)) { var t = ar(k.values, e), n = S.current[e].validate(t); return rr(n) ? (E({ type: "SET_ISVALIDATING", payload: !0 }), n.then((function (e) { return e })).then((function (t) { E({ type: "SET_FIELD_ERROR", payload: { field: e, value: t } }), E({ type: "SET_ISVALIDATING", payload: !1 }) }))) : (E({ type: "SET_FIELD_ERROR", payload: { field: e, value: n } }), Promise.resolve(n)) } return h.validationSchema ? (E({ type: "SET_ISVALIDATING", payload: !0 }), C(k.values, e).then((function (e) { return e })).then((function (t) { E({ type: "SET_FIELD_ERROR", payload: { field: e, value: t[e] } }), E({ type: "SET_ISVALIDATING", payload: !1 }) }))) : Promise.resolve() })), A = (0, r.useCallback)((function (e, t) { var n = t.validate; S.current[e] = { validate: n } }), []), N = (0, r.useCallback)((function (e) { delete S.current[e] }), []), M = br((function (e, t) { return E({ type: "SET_TOUCHED", payload: e }), (void 0 === t ? o : t) ? z(k.values) : Promise.resolve() })), L = (0, r.useCallback)((function (e) { E({ type: "SET_ERRORS", payload: e }) }), []), I = br((function (e, t) { var r = Zn(e) ? e(k.values) : e; return E({ type: "SET_VALUES", payload: r }), (void 0 === t ? n : t) ? z(r) : Promise.resolve() })), F = (0, r.useCallback)((function (e, t) { E({ type: "SET_FIELD_ERROR", payload: { field: e, value: t } }) }), []), D = br((function (e, t, r) { return E({ type: "SET_FIELD_VALUE", payload: { field: e, value: t } }), (void 0 === r ? n : r) ? z(lr(k.values, e, t)) : Promise.resolve() })), U = (0, r.useCallback)((function (e, t) { var n, r = t, a = e; if (!tr(e)) { e.persist && e.persist(); var l = e.target ? e.target : e.currentTarget, o = l.type, i = l.name, u = l.id, s = l.value, c = l.checked, f = (l.outerHTML, l.options), d = l.multiple; r = t || (i || u), a = /number|range/.test(o) ? (n = parseFloat(s), isNaN(n) ? "" : n) : /checkbox/.test(o) ? function (e, t, n) { if ("boolean" === typeof e) return Boolean(t); var r = [], a = !1, l = -1; if (Array.isArray(e)) r = e, a = (l = e.indexOf(n)) >= 0; else if (!n || "true" == n || "false" == n) return Boolean(t); if (t && n && !a) return r.concat(n); if (!a) return r; return r.slice(0, l).concat(r.slice(l + 1)) }(ar(k.values, r), c, s) : f && d ? function (e) { return Array.from(e).filter((function (e) { return e.selected })).map((function (e) { return e.value })) }(f) : s } r && D(r, a) }), [D, k.values]), V = br((function (e) { if (tr(e)) return function (t) { return U(t, e) }; U(e) })), B = br((function (e, t, n) { return void 0 === t && (t = !0), E({ type: "SET_FIELD_TOUCHED", payload: { field: e, value: t } }), (void 0 === n ? o : n) ? z(k.values) : Promise.resolve() })), $ = (0, r.useCallback)((function (e, t) { e.persist && e.persist(); var n = e.target, r = n.name, a = n.id, l = (n.outerHTML, t || (r || a)); B(l, !0) }), [B]), q = br((function (e) { if (tr(e)) return function (t) { return $(t, e) }; $(e) })), W = (0, r.useCallback)((function (e) { Zn(e) ? E({ type: "SET_FORMIK_STATE", payload: e }) : E({ type: "SET_FORMIK_STATE", payload: function () { return e } }) }), []), H = (0, r.useCallback)((function (e) { E({ type: "SET_STATUS", payload: e }) }), []), Q = (0, r.useCallback)((function (e) { E({ type: "SET_ISSUBMITTING", payload: e }) }), []), K = br((function () { return E({ type: "SUBMIT_ATTEMPT" }), z().then((function (e) { var t = e instanceof Error; if (!t && 0 === Object.keys(e).length) { var n; try { if (void 0 === (n = X())) return } catch (r) { throw r } return Promise.resolve(n).then((function (e) { return _.current && E({ type: "SUBMIT_SUCCESS" }), e })).catch((function (e) { if (_.current) throw E({ type: "SUBMIT_FAILURE" }), e })) } if (_.current && (E({ type: "SUBMIT_FAILURE" }), t)) throw e })) })), G = br((function (e) { e && e.preventDefault && Zn(e.preventDefault) && e.preventDefault(), e && e.stopPropagation && Zn(e.stopPropagation) && e.stopPropagation(), K().catch((function (e) { console.warn("Warning: An unhandled error was caught from submitForm()", e) })) })), Y = { resetForm: j, validateForm: z, validateField: R, setErrors: L, setFieldError: F, setFieldTouched: B, setFieldValue: D, setStatus: H, setSubmitting: Q, setTouched: M, setValues: I, setFormikState: W, submitForm: K }, X = br((function () { return p(k.values, Y) })), Z = br((function (e) { e && e.preventDefault && Zn(e.preventDefault) && e.preventDefault(), e && e.stopPropagation && Zn(e.stopPropagation) && e.stopPropagation(), j() })), J = (0, r.useCallback)((function (e) { return { value: ar(k.values, e), error: ar(k.errors, e), touched: !!ar(k.touched, e), initialValue: ar(y.current, e), initialTouched: !!ar(b.current, e), initialError: ar(v.current, e) } }), [k.errors, k.touched, k.values]), ee = (0, r.useCallback)((function (e) { return { setValue: function (t, n) { return D(e, t, n) }, setTouched: function (t, n) { return B(e, t, n) }, setError: function (t) { return F(e, t) } } }), [D, B, F]), te = (0, r.useCallback)((function (e) { var t = Jn(e), n = t ? e.name : e, r = ar(k.values, n), a = { name: n, value: r, onChange: V, onBlur: q }; if (t) { var l = e.type, o = e.value, i = e.as, u = e.multiple; "checkbox" === l ? void 0 === o ? a.checked = !!r : (a.checked = !(!Array.isArray(r) || !~r.indexOf(o)), a.value = o) : "radio" === l ? (a.checked = r === o, a.value = o) : "select" === i && u && (a.value = a.value || [], a.multiple = !0) } return a }), [q, V, k.values]), ne = (0, r.useMemo)((function () { return !l()(y.current, k.values) }), [y.current, k.values]), re = (0, r.useMemo)((function () { return "undefined" !== typeof s ? ne ? k.errors && 0 === Object.keys(k.errors).length : !1 !== s && Zn(s) ? s(h) : s : k.errors && 0 === Object.keys(k.errors).length }), [s, ne, k.errors, h]); return Qn({}, k, { initialValues: y.current, initialErrors: v.current, initialTouched: b.current, initialStatus: g.current, handleBlur: q, handleChange: V, handleReset: Z, handleSubmit: G, resetForm: j, setErrors: L, setFormikState: W, setFieldTouched: B, setFieldValue: D, setFieldError: F, setStatus: H, setSubmitting: Q, setTouched: M, setValues: I, submitForm: K, validateForm: z, validateField: R, isValid: re, dirty: ne, unregisterField: N, registerField: A, getFieldProps: te, getFieldMeta: J, getFieldHelpers: ee, validateOnBlur: o, validateOnChange: n, validateOnMount: u }) } function mr(e) { var t = pr(e), n = e.component, a = e.children, l = e.render, o = e.innerRef; return (0, r.useImperativeHandle)(o, (function () { return t })), (0, r.createElement)(ur, { value: t }, n ? (0, r.createElement)(n, t) : l ? l(t) : a ? Zn(a) ? a(t) : nr(a) ? null : r.Children.only(a) : null) } function hr(e) { var t = Array.isArray(e) ? [] : {}; for (var n in e) if (Object.prototype.hasOwnProperty.call(e, n)) { var r = String(n); !0 === Array.isArray(e[r]) ? t[r] = e[r].map((function (e) { return !0 === Array.isArray(e) || A(e) ? hr(e) : "" !== e ? e : void 0 })) : A(e[r]) ? t[r] = hr(e[r]) : t[r] = "" !== e[r] ? e[r] : void 0 } return t } function yr(e, t, n) { var r = e.slice(); return t.forEach((function (t, a) { if ("undefined" === typeof r[a]) { var l = !1 !== n.clone && n.isMergeableObject(t); r[a] = l ? f(Array.isArray(t) ? [] : {}, t, n) : t } else n.isMergeableObject(t) ? r[a] = f(e[a], t, n) : -1 === e.indexOf(t) && r.push(t) })), r } var vr = "undefined" !== typeof window && "undefined" !== typeof window.document && "undefined" !== typeof window.document.createElement ? r.useLayoutEffect : r.useEffect; function br(e) { var t = (0, r.useRef)(e); return vr((function () { t.current = e })), (0, r.useCallback)((function () { for (var e = arguments.length, n = new Array(e), r = 0; r < e; r++)n[r] = arguments[r]; return t.current.apply(void 0, n) }), []) } function gr(e) { var t = e.validate, n = e.name, a = e.render, l = e.children, o = e.as, i = e.component, u = Gn(e, ["validate", "name", "render", "children", "as", "component"]), s = Gn(sr(), ["validate", "validationSchema"]); var c = s.registerField, f = s.unregisterField; (0, r.useEffect)((function () { return c(n, { validate: t }), function () { f(n) } }), [c, f, n, t]); var d = s.getFieldProps(Qn({ name: n }, u)), p = s.getFieldMeta(n), m = { field: d, form: s }; if (a) return a(Qn({}, m, { meta: p })); if (Zn(l)) return l(Qn({}, m, { meta: p })); if (i) { if ("string" === typeof i) { var h = u.innerRef, y = Gn(u, ["innerRef"]); return (0, r.createElement)(i, Qn({ ref: h }, d, y), l) } return (0, r.createElement)(i, Qn({ field: d, form: s }, u), l) } var v = o || "input"; if ("string" === typeof v) { var b = u.innerRef, g = Gn(u, ["innerRef"]); return (0, r.createElement)(v, Qn({ ref: b }, d, g), l) } return (0, r.createElement)(v, Qn({}, d, u), l) } var _r = (0, r.forwardRef)((function (e, t) { var n = e.action, a = Gn(e, ["action"]), l = null != n ? n : "#", o = sr(), i = o.handleReset, u = o.handleSubmit; return (0, r.createElement)("form", Object.assign({ onSubmit: u, ref: t, onReset: i, action: l }, a)) })); _r.displayName = "Form"; var Sr = function (e, t, n) { var r = wr(e); return r.splice(t, 0, n), r }, wr = function (e) { if (e) { if (Array.isArray(e)) return [].concat(e); var t = Object.keys(e).map((function (e) { return parseInt(e) })).reduce((function (e, t) { return t > e ? t : e }), 0); return Array.from(Qn({}, e, { length: t + 1 })) } return [] }, kr = function (e) { function t(t) { var n; return (n = e.call(this, t) || this).updateArrayField = function (e, t, r) { var a = n.props, l = a.name; (0, a.formik.setFormikState)((function (n) { var a = "function" === typeof r ? r : e, o = "function" === typeof t ? t : e, i = lr(n.values, l, e(ar(n.values, l))), u = r ? a(ar(n.errors, l)) : void 0, s = t ? o(ar(n.touched, l)) : void 0; return Xn(u) && (u = void 0), Xn(s) && (s = void 0), Qn({}, n, { values: i, errors: r ? lr(n.errors, l, u) : n.errors, touched: t ? lr(n.touched, l, s) : n.touched }) })) }, n.push = function (e) { return n.updateArrayField((function (t) { return [].concat(wr(t), [Hn(e)]) }), !1, !1) }, n.handlePush = function (e) { return function () { return n.push(e) } }, n.swap = function (e, t) { return n.updateArrayField((function (n) { return function (e, t, n) { var r = wr(e), a = r[t]; return r[t] = r[n], r[n] = a, r }(n, e, t) }), !0, !0) }, n.handleSwap = function (e, t) { return function () { return n.swap(e, t) } }, n.move = function (e, t) { return n.updateArrayField((function (n) { return function (e, t, n) { var r = wr(e), a = r[t]; return r.splice(t, 1), r.splice(n, 0, a), r }(n, e, t) }), !0, !0) }, n.handleMove = function (e, t) { return function () { return n.move(e, t) } }, n.insert = function (e, t) { return n.updateArrayField((function (n) { return Sr(n, e, t) }), (function (t) { return Sr(t, e, null) }), (function (t) { return Sr(t, e, null) })) }, n.handleInsert = function (e, t) { return function () { return n.insert(e, t) } }, n.replace = function (e, t) { return n.updateArrayField((function (n) { return function (e, t, n) { var r = wr(e); return r[t] = n, r }(n, e, t) }), !1, !1) }, n.handleReplace = function (e, t) { return function () { return n.replace(e, t) } }, n.unshift = function (e) { var t = -1; return n.updateArrayField((function (n) { var r = n ? [e].concat(n) : [e]; return t < 0 && (t = r.length), r }), (function (e) { var n = e ? [null].concat(e) : [null]; return t < 0 && (t = n.length), n }), (function (e) { var n = e ? [null].concat(e) : [null]; return t < 0 && (t = n.length), n })), t }, n.handleUnshift = function (e) { return function () { return n.unshift(e) } }, n.handleRemove = function (e) { return function () { return n.remove(e) } }, n.handlePop = function () { return function () { return n.pop() } }, n.remove = n.remove.bind(Yn(n)), n.pop = n.pop.bind(Yn(n)), n } Kn(t, e); var n = t.prototype; return n.componentDidUpdate = function (e) { this.props.validateOnChange && this.props.formik.validateOnChange && !l()(ar(e.formik.values, e.name), ar(this.props.formik.values, this.props.name)) && this.props.formik.validateForm(this.props.formik.values) }, n.remove = function (e) { var t; return this.updateArrayField((function (n) { var r = n ? wr(n) : []; return t || (t = r[e]), Zn(r.splice) && r.splice(e, 1), r }), !0, !0), t }, n.pop = function () { var e; return this.updateArrayField((function (t) { var n = t; return e || (e = n && n.pop && n.pop()), n }), !0, !0), e }, n.render = function () { var e = { push: this.push, pop: this.pop, swap: this.swap, move: this.move, insert: this.insert, replace: this.replace, unshift: this.unshift, remove: this.remove, handlePush: this.handlePush, handlePop: this.handlePop, handleSwap: this.handleSwap, handleMove: this.handleMove, handleInsert: this.handleInsert, handleReplace: this.handleReplace, handleUnshift: this.handleUnshift, handleRemove: this.handleRemove }, t = this.props, n = t.component, a = t.render, l = t.children, o = t.name, i = Qn({}, e, { form: Gn(t.formik, ["validate", "validationSchema"]), name: o }); return n ? (0, r.createElement)(n, i) : a ? a(i) : l ? "function" === typeof l ? l(i) : nr(l) ? null : r.Children.only(l) : null }, t }(r.Component); kr.defaultProps = { validateOnChange: !0 } }, 219: (e, t, n) => { var r = n(86), a = { childContextTypes: !0, contextType: !0, contextTypes: !0, defaultProps: !0, displayName: !0, getDefaultProps: !0, getDerivedStateFromError: !0, getDerivedStateFromProps: !0, mixins: !0, propTypes: !0, type: !0 }, l = { name: !0, length: !0, prototype: !0, caller: !0, callee: !0, arguments: !0, arity: !0 }, o = { $$typeof: !0, compare: !0, defaultProps: !0, displayName: !0, propTypes: !0, type: !0 }, i = {}; function u(e) { return r.isMemo(e) ? o : i[e.$$typeof] || a } i[r.ForwardRef] = { $$typeof: !0, render: !0, defaultProps: !0, displayName: !0, propTypes: !0 }, i[r.Memo] = o; var s = Object.defineProperty, c = Object.getOwnPropertyNames, f = Object.getOwnPropertySymbols, d = Object.getOwnPropertyDescriptor, p = Object.getPrototypeOf, m = Object.prototype; e.exports = function e(t, n, r) { if ("string" !== typeof n) { if (m) { var a = p(n); a && a !== m && e(t, a, r) } var o = c(n); f && (o = o.concat(f(n))); for (var i = u(t), h = u(n), y = 0; y < o.length; ++y) { var v = o[y]; if (!l[v] && (!r || !r[v]) && (!h || !h[v]) && (!i || !i[v])) { var b = d(n, v); try { s(t, v, b) } catch (g) { } } } } return t } }, 730: (e, t, n) => { var r = n(43), a = n(853); function l(e) { for (var t = "https://reactjs.org/docs/error-decoder.html?invariant=" + e, n = 1; n < arguments.length; n++)t += "&args[]=" + encodeURIComponent(arguments[n]); return "Minified React error #" + e + "; visit " + t + " for the full message or use the non-minified dev environment for full errors and additional helpful warnings." } var o = new Set, i = {}; function u(e, t) { s(e, t), s(e + "Capture", t) } function s(e, t) { for (i[e] = t, e = 0; e < t.length; e++)o.add(t[e]) } var c = !("undefined" === typeof window || "undefined" === typeof window.document || "undefined" === typeof window.document.createElement), f = Object.prototype.hasOwnProperty, d = /^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/, p = {}, m = {}; function h(e, t, n, r, a, l, o) { this.acceptsBooleans = 2 === t || 3 === t || 4 === t, this.attributeName = r, this.attributeNamespace = a, this.mustUseProperty = n, this.propertyName = e, this.type = t, this.sanitizeURL = l, this.removeEmptyString = o } var y = {}; "children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach((function (e) { y[e] = new h(e, 0, !1, e, null, !1, !1) })), [["acceptCharset", "accept-charset"], ["className", "class"], ["htmlFor", "for"], ["httpEquiv", "http-equiv"]].forEach((function (e) { var t = e[0]; y[t] = new h(t, 1, !1, e[1], null, !1, !1) })), ["contentEditable", "draggable", "spellCheck", "value"].forEach((function (e) { y[e] = new h(e, 2, !1, e.toLowerCase(), null, !1, !1) })), ["autoReverse", "externalResourcesRequired", "focusable", "preserveAlpha"].forEach((function (e) { y[e] = new h(e, 2, !1, e, null, !1, !1) })), "allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach((function (e) { y[e] = new h(e, 3, !1, e.toLowerCase(), null, !1, !1) })), ["checked", "multiple", "muted", "selected"].forEach((function (e) { y[e] = new h(e, 3, !0, e, null, !1, !1) })), ["capture", "download"].forEach((function (e) { y[e] = new h(e, 4, !1, e, null, !1, !1) })), ["cols", "rows", "size", "span"].forEach((function (e) { y[e] = new h(e, 6, !1, e, null, !1, !1) })), ["rowSpan", "start"].forEach((function (e) { y[e] = new h(e, 5, !1, e.toLowerCase(), null, !1, !1) })); var v = /[\-:]([a-z])/g; function b(e) { return e[1].toUpperCase() } function g(e, t, n, r) { var a = y.hasOwnProperty(t) ? y[t] : null; (null !== a ? 0 !== a.type : r || !(2 < t.length) || "o" !== t[0] && "O" !== t[0] || "n" !== t[1] && "N" !== t[1]) && (function (e, t, n, r) { if (null === t || "undefined" === typeof t || function (e, t, n, r) { if (null !== n && 0 === n.type) return !1; switch (typeof t) { case "function": case "symbol": return !0; case "boolean": return !r && (null !== n ? !n.acceptsBooleans : "data-" !== (e = e.toLowerCase().slice(0, 5)) && "aria-" !== e); default: return !1 } }(e, t, n, r)) return !0; if (r) return !1; if (null !== n) switch (n.type) { case 3: return !t; case 4: return !1 === t; case 5: return isNaN(t); case 6: return isNaN(t) || 1 > t }return !1 }(t, n, a, r) && (n = null), r || null === a ? function (e) { return !!f.call(m, e) || !f.call(p, e) && (d.test(e) ? m[e] = !0 : (p[e] = !0, !1)) }(t) && (null === n ? e.removeAttribute(t) : e.setAttribute(t, "" + n)) : a.mustUseProperty ? e[a.propertyName] = null === n ? 3 !== a.type && "" : n : (t = a.attributeName, r = a.attributeNamespace, null === n ? e.removeAttribute(t) : (n = 3 === (a = a.type) || 4 === a && !0 === n ? "" : "" + n, r ? e.setAttributeNS(r, t, n) : e.setAttribute(t, n)))) } "accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach((function (e) { var t = e.replace(v, b); y[t] = new h(t, 1, !1, e, null, !1, !1) })), "xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach((function (e) { var t = e.replace(v, b); y[t] = new h(t, 1, !1, e, "http://www.w3.org/1999/xlink", !1, !1) })), ["xml:base", "xml:lang", "xml:space"].forEach((function (e) { var t = e.replace(v, b); y[t] = new h(t, 1, !1, e, "http://www.w3.org/XML/1998/namespace", !1, !1) })), ["tabIndex", "crossOrigin"].forEach((function (e) { y[e] = new h(e, 1, !1, e.toLowerCase(), null, !1, !1) })), y.xlinkHref = new h("xlinkHref", 1, !1, "xlink:href", "http://www.w3.org/1999/xlink", !0, !1), ["src", "href", "action", "formAction"].forEach((function (e) { y[e] = new h(e, 1, !1, e.toLowerCase(), null, !0, !0) })); var _ = r.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED, S = Symbol.for("react.element"), w = Symbol.for("react.portal"), k = Symbol.for("react.fragment"), E = Symbol.for("react.strict_mode"), x = Symbol.for("react.profiler"), C = Symbol.for("react.provider"), T = Symbol.for("react.context"), P = Symbol.for("react.forward_ref"), O = Symbol.for("react.suspense"), z = Symbol.for("react.suspense_list"), j = Symbol.for("react.memo"), R = Symbol.for("react.lazy"); Symbol.for("react.scope"), Symbol.for("react.debug_trace_mode"); var A = Symbol.for("react.offscreen"); Symbol.for("react.legacy_hidden"), Symbol.for("react.cache"), Symbol.for("react.tracing_marker"); var N = Symbol.iterator; function M(e) { return null === e || "object" !== typeof e ? null : "function" === typeof (e = N && e[N] || e["@@iterator"]) ? e : null } var L, I = Object.assign; function F(e) { if (void 0 === L) try { throw Error() } catch (n) { var t = n.stack.trim().match(/\n( *(at )?)/); L = t && t[1] || "" } return "\n" + L + e } var D = !1; function U(e, t) { if (!e || D) return ""; D = !0; var n = Error.prepareStackTrace; Error.prepareStackTrace = void 0; try { if (t) if (t = function () { throw Error() }, Object.defineProperty(t.prototype, "props", { set: function () { throw Error() } }), "object" === typeof Reflect && Reflect.construct) { try { Reflect.construct(t, []) } catch (s) { var r = s } Reflect.construct(e, [], t) } else { try { t.call() } catch (s) { r = s } e.call(t.prototype) } else { try { throw Error() } catch (s) { r = s } e() } } catch (s) { if (s && r && "string" === typeof s.stack) { for (var a = s.stack.split("\n"), l = r.stack.split("\n"), o = a.length - 1, i = l.length - 1; 1 <= o && 0 <= i && a[o] !== l[i];)i--; for (; 1 <= o && 0 <= i; o--, i--)if (a[o] !== l[i]) { if (1 !== o || 1 !== i) do { if (o--, 0 > --i || a[o] !== l[i]) { var u = "\n" + a[o].replace(" at new ", " at "); return e.displayName && u.includes("") && (u = u.replace("", e.displayName)), u } } while (1 <= o && 0 <= i); break } } } finally { D = !1, Error.prepareStackTrace = n } return (e = e ? e.displayName || e.name : "") ? F(e) : "" } function V(e) { switch (e.tag) { case 5: return F(e.type); case 16: return F("Lazy"); case 13: return F("Suspense"); case 19: return F("SuspenseList"); case 0: case 2: case 15: return e = U(e.type, !1); case 11: return e = U(e.type.render, !1); case 1: return e = U(e.type, !0); default: return "" } } function B(e) { if (null == e) return null; if ("function" === typeof e) return e.displayName || e.name || null; if ("string" === typeof e) return e; switch (e) { case k: return "Fragment"; case w: return "Portal"; case x: return "Profiler"; case E: return "StrictMode"; case O: return "Suspense"; case z: return "SuspenseList" }if ("object" === typeof e) switch (e.$$typeof) { case T: return (e.displayName || "Context") + ".Consumer"; case C: return (e._context.displayName || "Context") + ".Provider"; case P: var t = e.render; return (e = e.displayName) || (e = "" !== (e = t.displayName || t.name || "") ? "ForwardRef(" + e + ")" : "ForwardRef"), e; case j: return null !== (t = e.displayName || null) ? t : B(e.type) || "Memo"; case R: t = e._payload, e = e._init; try { return B(e(t)) } catch (n) { } }return null } function $(e) { var t = e.type; switch (e.tag) { case 24: return "Cache"; case 9: return (t.displayName || "Context") + ".Consumer"; case 10: return (t._context.displayName || "Context") + ".Provider"; case 18: return "DehydratedFragment"; case 11: return e = (e = t.render).displayName || e.name || "", t.displayName || ("" !== e ? "ForwardRef(" + e + ")" : "ForwardRef"); case 7: return "Fragment"; case 5: return t; case 4: return "Portal"; case 3: return "Root"; case 6: return "Text"; case 16: return B(t); case 8: return t === E ? "StrictMode" : "Mode"; case 22: return "Offscreen"; case 12: return "Profiler"; case 21: return "Scope"; case 13: return "Suspense"; case 19: return "SuspenseList"; case 25: return "TracingMarker"; case 1: case 0: case 17: case 2: case 14: case 15: if ("function" === typeof t) return t.displayName || t.name || null; if ("string" === typeof t) return t }return null } function q(e) { switch (typeof e) { case "boolean": case "number": case "string": case "undefined": case "object": return e; default: return "" } } function W(e) { var t = e.type; return (e = e.nodeName) && "input" === e.toLowerCase() && ("checkbox" === t || "radio" === t) } function H(e) { e._valueTracker || (e._valueTracker = function (e) { var t = W(e) ? "checked" : "value", n = Object.getOwnPropertyDescriptor(e.constructor.prototype, t), r = "" + e[t]; if (!e.hasOwnProperty(t) && "undefined" !== typeof n && "function" === typeof n.get && "function" === typeof n.set) { var a = n.get, l = n.set; return Object.defineProperty(e, t, { configurable: !0, get: function () { return a.call(this) }, set: function (e) { r = "" + e, l.call(this, e) } }), Object.defineProperty(e, t, { enumerable: n.enumerable }), { getValue: function () { return r }, setValue: function (e) { r = "" + e }, stopTracking: function () { e._valueTracker = null, delete e[t] } } } }(e)) } function Q(e) { if (!e) return !1; var t = e._valueTracker; if (!t) return !0; var n = t.getValue(), r = ""; return e && (r = W(e) ? e.checked ? "true" : "false" : e.value), (e = r) !== n && (t.setValue(e), !0) } function K(e) { if ("undefined" === typeof (e = e || ("undefined" !== typeof document ? document : void 0))) return null; try { return e.activeElement || e.body } catch (t) { return e.body } } function G(e, t) { var n = t.checked; return I({}, t, { defaultChecked: void 0, defaultValue: void 0, value: void 0, checked: null != n ? n : e._wrapperState.initialChecked }) } function Y(e, t) { var n = null == t.defaultValue ? "" : t.defaultValue, r = null != t.checked ? t.checked : t.defaultChecked; n = q(null != t.value ? t.value : n), e._wrapperState = { initialChecked: r, initialValue: n, controlled: "checkbox" === t.type || "radio" === t.type ? null != t.checked : null != t.value } } function X(e, t) { null != (t = t.checked) && g(e, "checked", t, !1) } function Z(e, t) { X(e, t); var n = q(t.value), r = t.type; if (null != n) "number" === r ? (0 === n && "" === e.value || e.value != n) && (e.value = "" + n) : e.value !== "" + n && (e.value = "" + n); else if ("submit" === r || "reset" === r) return void e.removeAttribute("value"); t.hasOwnProperty("value") ? ee(e, t.type, n) : t.hasOwnProperty("defaultValue") && ee(e, t.type, q(t.defaultValue)), null == t.checked && null != t.defaultChecked && (e.defaultChecked = !!t.defaultChecked) } function J(e, t, n) { if (t.hasOwnProperty("value") || t.hasOwnProperty("defaultValue")) { var r = t.type; if (!("submit" !== r && "reset" !== r || void 0 !== t.value && null !== t.value)) return; t = "" + e._wrapperState.initialValue, n || t === e.value || (e.value = t), e.defaultValue = t } "" !== (n = e.name) && (e.name = ""), e.defaultChecked = !!e._wrapperState.initialChecked, "" !== n && (e.name = n) } function ee(e, t, n) { "number" === t && K(e.ownerDocument) === e || (null == n ? e.defaultValue = "" + e._wrapperState.initialValue : e.defaultValue !== "" + n && (e.defaultValue = "" + n)) } var te = Array.isArray; function ne(e, t, n, r) { if (e = e.options, t) { t = {}; for (var a = 0; a < n.length; a++)t["$" + n[a]] = !0; for (n = 0; n < e.length; n++)a = t.hasOwnProperty("$" + e[n].value), e[n].selected !== a && (e[n].selected = a), a && r && (e[n].defaultSelected = !0) } else { for (n = "" + q(n), t = null, a = 0; a < e.length; a++) { if (e[a].value === n) return e[a].selected = !0, void (r && (e[a].defaultSelected = !0)); null !== t || e[a].disabled || (t = e[a]) } null !== t && (t.selected = !0) } } function re(e, t) { if (null != t.dangerouslySetInnerHTML) throw Error(l(91)); return I({}, t, { value: void 0, defaultValue: void 0, children: "" + e._wrapperState.initialValue }) } function ae(e, t) { var n = t.value; if (null == n) { if (n = t.children, t = t.defaultValue, null != n) { if (null != t) throw Error(l(92)); if (te(n)) { if (1 < n.length) throw Error(l(93)); n = n[0] } t = n } null == t && (t = ""), n = t } e._wrapperState = { initialValue: q(n) } } function le(e, t) { var n = q(t.value), r = q(t.defaultValue); null != n && ((n = "" + n) !== e.value && (e.value = n), null == t.defaultValue && e.defaultValue !== n && (e.defaultValue = n)), null != r && (e.defaultValue = "" + r) } function oe(e) { var t = e.textContent; t === e._wrapperState.initialValue && "" !== t && null !== t && (e.value = t) } function ie(e) { switch (e) { case "svg": return "http://www.w3.org/2000/svg"; case "math": return "http://www.w3.org/1998/Math/MathML"; default: return "http://www.w3.org/1999/xhtml" } } function ue(e, t) { return null == e || "http://www.w3.org/1999/xhtml" === e ? ie(t) : "http://www.w3.org/2000/svg" === e && "foreignObject" === t ? "http://www.w3.org/1999/xhtml" : e } var se, ce, fe = (ce = function (e, t) { if ("http://www.w3.org/2000/svg" !== e.namespaceURI || "innerHTML" in e) e.innerHTML = t; else { for ((se = se || document.createElement("div")).innerHTML = "" + t.valueOf().toString() + "", t = se.firstChild; e.firstChild;)e.removeChild(e.firstChild); for (; t.firstChild;)e.appendChild(t.firstChild) } }, "undefined" !== typeof MSApp && MSApp.execUnsafeLocalFunction ? function (e, t, n, r) { MSApp.execUnsafeLocalFunction((function () { return ce(e, t) })) } : ce); function de(e, t) { if (t) { var n = e.firstChild; if (n && n === e.lastChild && 3 === n.nodeType) return void (n.nodeValue = t) } e.textContent = t } var pe = { animationIterationCount: !0, aspectRatio: !0, borderImageOutset: !0, borderImageSlice: !0, borderImageWidth: !0, boxFlex: !0, boxFlexGroup: !0, boxOrdinalGroup: !0, columnCount: !0, columns: !0, flex: !0, flexGrow: !0, flexPositive: !0, flexShrink: !0, flexNegative: !0, flexOrder: !0, gridArea: !0, gridRow: !0, gridRowEnd: !0, gridRowSpan: !0, gridRowStart: !0, gridColumn: !0, gridColumnEnd: !0, gridColumnSpan: !0, gridColumnStart: !0, fontWeight: !0, lineClamp: !0, lineHeight: !0, opacity: !0, order: !0, orphans: !0, tabSize: !0, widows: !0, zIndex: !0, zoom: !0, fillOpacity: !0, floodOpacity: !0, stopOpacity: !0, strokeDasharray: !0, strokeDashoffset: !0, strokeMiterlimit: !0, strokeOpacity: !0, strokeWidth: !0 }, me = ["Webkit", "ms", "Moz", "O"]; function he(e, t, n) { return null == t || "boolean" === typeof t || "" === t ? "" : n || "number" !== typeof t || 0 === t || pe.hasOwnProperty(e) && pe[e] ? ("" + t).trim() : t + "px" } function ye(e, t) { for (var n in e = e.style, t) if (t.hasOwnProperty(n)) { var r = 0 === n.indexOf("--"), a = he(n, t[n], r); "float" === n && (n = "cssFloat"), r ? e.setProperty(n, a) : e[n] = a } } Object.keys(pe).forEach((function (e) { me.forEach((function (t) { t = t + e.charAt(0).toUpperCase() + e.substring(1), pe[t] = pe[e] })) })); var ve = I({ menuitem: !0 }, { area: !0, base: !0, br: !0, col: !0, embed: !0, hr: !0, img: !0, input: !0, keygen: !0, link: !0, meta: !0, param: !0, source: !0, track: !0, wbr: !0 }); function be(e, t) { if (t) { if (ve[e] && (null != t.children || null != t.dangerouslySetInnerHTML)) throw Error(l(137, e)); if (null != t.dangerouslySetInnerHTML) { if (null != t.children) throw Error(l(60)); if ("object" !== typeof t.dangerouslySetInnerHTML || !("__html" in t.dangerouslySetInnerHTML)) throw Error(l(61)) } if (null != t.style && "object" !== typeof t.style) throw Error(l(62)) } } function ge(e, t) { if (-1 === e.indexOf("-")) return "string" === typeof t.is; switch (e) { case "annotation-xml": case "color-profile": case "font-face": case "font-face-src": case "font-face-uri": case "font-face-format": case "font-face-name": case "missing-glyph": return !1; default: return !0 } } var _e = null; function Se(e) { return (e = e.target || e.srcElement || window).correspondingUseElement && (e = e.correspondingUseElement), 3 === e.nodeType ? e.parentNode : e } var we = null, ke = null, Ee = null; function xe(e) { if (e = ga(e)) { if ("function" !== typeof we) throw Error(l(280)); var t = e.stateNode; t && (t = Sa(t), we(e.stateNode, e.type, t)) } } function Ce(e) { ke ? Ee ? Ee.push(e) : Ee = [e] : ke = e } function Te() { if (ke) { var e = ke, t = Ee; if (Ee = ke = null, xe(e), t) for (e = 0; e < t.length; e++)xe(t[e]) } } function Pe(e, t) { return e(t) } function Oe() { } var ze = !1; function je(e, t, n) { if (ze) return e(t, n); ze = !0; try { return Pe(e, t, n) } finally { ze = !1, (null !== ke || null !== Ee) && (Oe(), Te()) } } function Re(e, t) { var n = e.stateNode; if (null === n) return null; var r = Sa(n); if (null === r) return null; n = r[t]; e: switch (t) { case "onClick": case "onClickCapture": case "onDoubleClick": case "onDoubleClickCapture": case "onMouseDown": case "onMouseDownCapture": case "onMouseMove": case "onMouseMoveCapture": case "onMouseUp": case "onMouseUpCapture": case "onMouseEnter": (r = !r.disabled) || (r = !("button" === (e = e.type) || "input" === e || "select" === e || "textarea" === e)), e = !r; break e; default: e = !1 }if (e) return null; if (n && "function" !== typeof n) throw Error(l(231, t, typeof n)); return n } var Ae = !1; if (c) try { var Ne = {}; Object.defineProperty(Ne, "passive", { get: function () { Ae = !0 } }), window.addEventListener("test", Ne, Ne), window.removeEventListener("test", Ne, Ne) } catch (ce) { Ae = !1 } function Me(e, t, n, r, a, l, o, i, u) { var s = Array.prototype.slice.call(arguments, 3); try { t.apply(n, s) } catch (c) { this.onError(c) } } var Le = !1, Ie = null, Fe = !1, De = null, Ue = { onError: function (e) { Le = !0, Ie = e } }; function Ve(e, t, n, r, a, l, o, i, u) { Le = !1, Ie = null, Me.apply(Ue, arguments) } function Be(e) { var t = e, n = e; if (e.alternate) for (; t.return;)t = t.return; else { e = t; do { 0 !== (4098 & (t = e).flags) && (n = t.return), e = t.return } while (e) } return 3 === t.tag ? n : null } function $e(e) { if (13 === e.tag) { var t = e.memoizedState; if (null === t && (null !== (e = e.alternate) && (t = e.memoizedState)), null !== t) return t.dehydrated } return null } function qe(e) { if (Be(e) !== e) throw Error(l(188)) } function We(e) { return null !== (e = function (e) { var t = e.alternate; if (!t) { if (null === (t = Be(e))) throw Error(l(188)); return t !== e ? null : e } for (var n = e, r = t; ;) { var a = n.return; if (null === a) break; var o = a.alternate; if (null === o) { if (null !== (r = a.return)) { n = r; continue } break } if (a.child === o.child) { for (o = a.child; o;) { if (o === n) return qe(a), e; if (o === r) return qe(a), t; o = o.sibling } throw Error(l(188)) } if (n.return !== r.return) n = a, r = o; else { for (var i = !1, u = a.child; u;) { if (u === n) { i = !0, n = a, r = o; break } if (u === r) { i = !0, r = a, n = o; break } u = u.sibling } if (!i) { for (u = o.child; u;) { if (u === n) { i = !0, n = o, r = a; break } if (u === r) { i = !0, r = o, n = a; break } u = u.sibling } if (!i) throw Error(l(189)) } } if (n.alternate !== r) throw Error(l(190)) } if (3 !== n.tag) throw Error(l(188)); return n.stateNode.current === n ? e : t }(e)) ? He(e) : null } function He(e) { if (5 === e.tag || 6 === e.tag) return e; for (e = e.child; null !== e;) { var t = He(e); if (null !== t) return t; e = e.sibling } return null } var Qe = a.unstable_scheduleCallback, Ke = a.unstable_cancelCallback, Ge = a.unstable_shouldYield, Ye = a.unstable_requestPaint, Xe = a.unstable_now, Ze = a.unstable_getCurrentPriorityLevel, Je = a.unstable_ImmediatePriority, et = a.unstable_UserBlockingPriority, tt = a.unstable_NormalPriority, nt = a.unstable_LowPriority, rt = a.unstable_IdlePriority, at = null, lt = null; var ot = Math.clz32 ? Math.clz32 : function (e) { return e >>>= 0, 0 === e ? 32 : 31 - (it(e) / ut | 0) | 0 }, it = Math.log, ut = Math.LN2; var st = 64, ct = 4194304; function ft(e) { switch (e & -e) { case 1: return 1; case 2: return 2; case 4: return 4; case 8: return 8; case 16: return 16; case 32: return 32; case 64: case 128: case 256: case 512: case 1024: case 2048: case 4096: case 8192: case 16384: case 32768: case 65536: case 131072: case 262144: case 524288: case 1048576: case 2097152: return 4194240 & e; case 4194304: case 8388608: case 16777216: case 33554432: case 67108864: return 130023424 & e; case 134217728: return 134217728; case 268435456: return 268435456; case 536870912: return 536870912; case 1073741824: return 1073741824; default: return e } } function dt(e, t) { var n = e.pendingLanes; if (0 === n) return 0; var r = 0, a = e.suspendedLanes, l = e.pingedLanes, o = 268435455 & n; if (0 !== o) { var i = o & ~a; 0 !== i ? r = ft(i) : 0 !== (l &= o) && (r = ft(l)) } else 0 !== (o = n & ~a) ? r = ft(o) : 0 !== l && (r = ft(l)); if (0 === r) return 0; if (0 !== t && t !== r && 0 === (t & a) && ((a = r & -r) >= (l = t & -t) || 16 === a && 0 !== (4194240 & l))) return t; if (0 !== (4 & r) && (r |= 16 & n), 0 !== (t = e.entangledLanes)) for (e = e.entanglements, t &= r; 0 < t;)a = 1 << (n = 31 - ot(t)), r |= e[n], t &= ~a; return r } function pt(e, t) { switch (e) { case 1: case 2: case 4: return t + 250; case 8: case 16: case 32: case 64: case 128: case 256: case 512: case 1024: case 2048: case 4096: case 8192: case 16384: case 32768: case 65536: case 131072: case 262144: case 524288: case 1048576: case 2097152: return t + 5e3; default: return -1 } } function mt(e) { return 0 !== (e = -1073741825 & e.pendingLanes) ? e : 1073741824 & e ? 1073741824 : 0 } function ht() { var e = st; return 0 === (4194240 & (st <<= 1)) && (st = 64), e } function yt(e) { for (var t = [], n = 0; 31 > n; n++)t.push(e); return t } function vt(e, t, n) { e.pendingLanes |= t, 536870912 !== t && (e.suspendedLanes = 0, e.pingedLanes = 0), (e = e.eventTimes)[t = 31 - ot(t)] = n } function bt(e, t) { var n = e.entangledLanes |= t; for (e = e.entanglements; n;) { var r = 31 - ot(n), a = 1 << r; a & t | e[r] & t && (e[r] |= t), n &= ~a } } var gt = 0; function _t(e) { return 1 < (e &= -e) ? 4 < e ? 0 !== (268435455 & e) ? 16 : 536870912 : 4 : 1 } var St, wt, kt, Et, xt, Ct = !1, Tt = [], Pt = null, Ot = null, zt = null, jt = new Map, Rt = new Map, At = [], Nt = "mousedown mouseup touchcancel touchend touchstart auxclick dblclick pointercancel pointerdown pointerup dragend dragstart drop compositionend compositionstart keydown keypress keyup input textInput copy cut paste click change contextmenu reset submit".split(" "); function Mt(e, t) { switch (e) { case "focusin": case "focusout": Pt = null; break; case "dragenter": case "dragleave": Ot = null; break; case "mouseover": case "mouseout": zt = null; break; case "pointerover": case "pointerout": jt.delete(t.pointerId); break; case "gotpointercapture": case "lostpointercapture": Rt.delete(t.pointerId) } } function Lt(e, t, n, r, a, l) { return null === e || e.nativeEvent !== l ? (e = { blockedOn: t, domEventName: n, eventSystemFlags: r, nativeEvent: l, targetContainers: [a] }, null !== t && (null !== (t = ga(t)) && wt(t)), e) : (e.eventSystemFlags |= r, t = e.targetContainers, null !== a && -1 === t.indexOf(a) && t.push(a), e) } function It(e) { var t = ba(e.target); if (null !== t) { var n = Be(t); if (null !== n) if (13 === (t = n.tag)) { if (null !== (t = $e(n))) return e.blockedOn = t, void xt(e.priority, (function () { kt(n) })) } else if (3 === t && n.stateNode.current.memoizedState.isDehydrated) return void (e.blockedOn = 3 === n.tag ? n.stateNode.containerInfo : null) } e.blockedOn = null } function Ft(e) { if (null !== e.blockedOn) return !1; for (var t = e.targetContainers; 0 < t.length;) { var n = Gt(e.domEventName, e.eventSystemFlags, t[0], e.nativeEvent); if (null !== n) return null !== (t = ga(n)) && wt(t), e.blockedOn = n, !1; var r = new (n = e.nativeEvent).constructor(n.type, n); _e = r, n.target.dispatchEvent(r), _e = null, t.shift() } return !0 } function Dt(e, t, n) { Ft(e) && n.delete(t) } function Ut() { Ct = !1, null !== Pt && Ft(Pt) && (Pt = null), null !== Ot && Ft(Ot) && (Ot = null), null !== zt && Ft(zt) && (zt = null), jt.forEach(Dt), Rt.forEach(Dt) } function Vt(e, t) { e.blockedOn === t && (e.blockedOn = null, Ct || (Ct = !0, a.unstable_scheduleCallback(a.unstable_NormalPriority, Ut))) } function Bt(e) { function t(t) { return Vt(t, e) } if (0 < Tt.length) { Vt(Tt[0], e); for (var n = 1; n < Tt.length; n++) { var r = Tt[n]; r.blockedOn === e && (r.blockedOn = null) } } for (null !== Pt && Vt(Pt, e), null !== Ot && Vt(Ot, e), null !== zt && Vt(zt, e), jt.forEach(t), Rt.forEach(t), n = 0; n < At.length; n++)(r = At[n]).blockedOn === e && (r.blockedOn = null); for (; 0 < At.length && null === (n = At[0]).blockedOn;)It(n), null === n.blockedOn && At.shift() } var $t = _.ReactCurrentBatchConfig, qt = !0; function Wt(e, t, n, r) { var a = gt, l = $t.transition; $t.transition = null; try { gt = 1, Qt(e, t, n, r) } finally { gt = a, $t.transition = l } } function Ht(e, t, n, r) { var a = gt, l = $t.transition; $t.transition = null; try { gt = 4, Qt(e, t, n, r) } finally { gt = a, $t.transition = l } } function Qt(e, t, n, r) { if (qt) { var a = Gt(e, t, n, r); if (null === a) qr(e, t, r, Kt, n), Mt(e, r); else if (function (e, t, n, r, a) { switch (t) { case "focusin": return Pt = Lt(Pt, e, t, n, r, a), !0; case "dragenter": return Ot = Lt(Ot, e, t, n, r, a), !0; case "mouseover": return zt = Lt(zt, e, t, n, r, a), !0; case "pointerover": var l = a.pointerId; return jt.set(l, Lt(jt.get(l) || null, e, t, n, r, a)), !0; case "gotpointercapture": return l = a.pointerId, Rt.set(l, Lt(Rt.get(l) || null, e, t, n, r, a)), !0 }return !1 }(a, e, t, n, r)) r.stopPropagation(); else if (Mt(e, r), 4 & t && -1 < Nt.indexOf(e)) { for (; null !== a;) { var l = ga(a); if (null !== l && St(l), null === (l = Gt(e, t, n, r)) && qr(e, t, r, Kt, n), l === a) break; a = l } null !== a && r.stopPropagation() } else qr(e, t, r, null, n) } } var Kt = null; function Gt(e, t, n, r) { if (Kt = null, null !== (e = ba(e = Se(r)))) if (null === (t = Be(e))) e = null; else if (13 === (n = t.tag)) { if (null !== (e = $e(t))) return e; e = null } else if (3 === n) { if (t.stateNode.current.memoizedState.isDehydrated) return 3 === t.tag ? t.stateNode.containerInfo : null; e = null } else t !== e && (e = null); return Kt = e, null } function Yt(e) { switch (e) { case "cancel": case "click": case "close": case "contextmenu": case "copy": case "cut": case "auxclick": case "dblclick": case "dragend": case "dragstart": case "drop": case "focusin": case "focusout": case "input": case "invalid": case "keydown": case "keypress": case "keyup": case "mousedown": case "mouseup": case "paste": case "pause": case "play": case "pointercancel": case "pointerdown": case "pointerup": case "ratechange": case "reset": case "resize": case "seeked": case "submit": case "touchcancel": case "touchend": case "touchstart": case "volumechange": case "change": case "selectionchange": case "textInput": case "compositionstart": case "compositionend": case "compositionupdate": case "beforeblur": case "afterblur": case "beforeinput": case "blur": case "fullscreenchange": case "focus": case "hashchange": case "popstate": case "select": case "selectstart": return 1; case "drag": case "dragenter": case "dragexit": case "dragleave": case "dragover": case "mousemove": case "mouseout": case "mouseover": case "pointermove": case "pointerout": case "pointerover": case "scroll": case "toggle": case "touchmove": case "wheel": case "mouseenter": case "mouseleave": case "pointerenter": case "pointerleave": return 4; case "message": switch (Ze()) { case Je: return 1; case et: return 4; case tt: case nt: return 16; case rt: return 536870912; default: return 16 }default: return 16 } } var Xt = null, Zt = null, Jt = null; function en() { if (Jt) return Jt; var e, t, n = Zt, r = n.length, a = "value" in Xt ? Xt.value : Xt.textContent, l = a.length; for (e = 0; e < r && n[e] === a[e]; e++); var o = r - e; for (t = 1; t <= o && n[r - t] === a[l - t]; t++); return Jt = a.slice(e, 1 < t ? 1 - t : void 0) } function tn(e) { var t = e.keyCode; return "charCode" in e ? 0 === (e = e.charCode) && 13 === t && (e = 13) : e = t, 10 === e && (e = 13), 32 <= e || 13 === e ? e : 0 } function nn() { return !0 } function rn() { return !1 } function an(e) { function t(t, n, r, a, l) { for (var o in this._reactName = t, this._targetInst = r, this.type = n, this.nativeEvent = a, this.target = l, this.currentTarget = null, e) e.hasOwnProperty(o) && (t = e[o], this[o] = t ? t(a) : a[o]); return this.isDefaultPrevented = (null != a.defaultPrevented ? a.defaultPrevented : !1 === a.returnValue) ? nn : rn, this.isPropagationStopped = rn, this } return I(t.prototype, { preventDefault: function () { this.defaultPrevented = !0; var e = this.nativeEvent; e && (e.preventDefault ? e.preventDefault() : "unknown" !== typeof e.returnValue && (e.returnValue = !1), this.isDefaultPrevented = nn) }, stopPropagation: function () { var e = this.nativeEvent; e && (e.stopPropagation ? e.stopPropagation() : "unknown" !== typeof e.cancelBubble && (e.cancelBubble = !0), this.isPropagationStopped = nn) }, persist: function () { }, isPersistent: nn }), t } var ln, on, un, sn = { eventPhase: 0, bubbles: 0, cancelable: 0, timeStamp: function (e) { return e.timeStamp || Date.now() }, defaultPrevented: 0, isTrusted: 0 }, cn = an(sn), fn = I({}, sn, { view: 0, detail: 0 }), dn = an(fn), pn = I({}, fn, { screenX: 0, screenY: 0, clientX: 0, clientY: 0, pageX: 0, pageY: 0, ctrlKey: 0, shiftKey: 0, altKey: 0, metaKey: 0, getModifierState: xn, button: 0, buttons: 0, relatedTarget: function (e) { return void 0 === e.relatedTarget ? e.fromElement === e.srcElement ? e.toElement : e.fromElement : e.relatedTarget }, movementX: function (e) { return "movementX" in e ? e.movementX : (e !== un && (un && "mousemove" === e.type ? (ln = e.screenX - un.screenX, on = e.screenY - un.screenY) : on = ln = 0, un = e), ln) }, movementY: function (e) { return "movementY" in e ? e.movementY : on } }), mn = an(pn), hn = an(I({}, pn, { dataTransfer: 0 })), yn = an(I({}, fn, { relatedTarget: 0 })), vn = an(I({}, sn, { animationName: 0, elapsedTime: 0, pseudoElement: 0 })), bn = I({}, sn, { clipboardData: function (e) { return "clipboardData" in e ? e.clipboardData : window.clipboardData } }), gn = an(bn), _n = an(I({}, sn, { data: 0 })), Sn = { Esc: "Escape", Spacebar: " ", Left: "ArrowLeft", Up: "ArrowUp", Right: "ArrowRight", Down: "ArrowDown", Del: "Delete", Win: "OS", Menu: "ContextMenu", Apps: "ContextMenu", Scroll: "ScrollLock", MozPrintableKey: "Unidentified" }, wn = { 8: "Backspace", 9: "Tab", 12: "Clear", 13: "Enter", 16: "Shift", 17: "Control", 18: "Alt", 19: "Pause", 20: "CapsLock", 27: "Escape", 32: " ", 33: "PageUp", 34: "PageDown", 35: "End", 36: "Home", 37: "ArrowLeft", 38: "ArrowUp", 39: "ArrowRight", 40: "ArrowDown", 45: "Insert", 46: "Delete", 112: "F1", 113: "F2", 114: "F3", 115: "F4", 116: "F5", 117: "F6", 118: "F7", 119: "F8", 120: "F9", 121: "F10", 122: "F11", 123: "F12", 144: "NumLock", 145: "ScrollLock", 224: "Meta" }, kn = { Alt: "altKey", Control: "ctrlKey", Meta: "metaKey", Shift: "shiftKey" }; function En(e) { var t = this.nativeEvent; return t.getModifierState ? t.getModifierState(e) : !!(e = kn[e]) && !!t[e] } function xn() { return En } var Cn = I({}, fn, { key: function (e) { if (e.key) { var t = Sn[e.key] || e.key; if ("Unidentified" !== t) return t } return "keypress" === e.type ? 13 === (e = tn(e)) ? "Enter" : String.fromCharCode(e) : "keydown" === e.type || "keyup" === e.type ? wn[e.keyCode] || "Unidentified" : "" }, code: 0, location: 0, ctrlKey: 0, shiftKey: 0, altKey: 0, metaKey: 0, repeat: 0, locale: 0, getModifierState: xn, charCode: function (e) { return "keypress" === e.type ? tn(e) : 0 }, keyCode: function (e) { return "keydown" === e.type || "keyup" === e.type ? e.keyCode : 0 }, which: function (e) { return "keypress" === e.type ? tn(e) : "keydown" === e.type || "keyup" === e.type ? e.keyCode : 0 } }), Tn = an(Cn), Pn = an(I({}, pn, { pointerId: 0, width: 0, height: 0, pressure: 0, tangentialPressure: 0, tiltX: 0, tiltY: 0, twist: 0, pointerType: 0, isPrimary: 0 })), On = an(I({}, fn, { touches: 0, targetTouches: 0, changedTouches: 0, altKey: 0, metaKey: 0, ctrlKey: 0, shiftKey: 0, getModifierState: xn })), zn = an(I({}, sn, { propertyName: 0, elapsedTime: 0, pseudoElement: 0 })), jn = I({}, pn, { deltaX: function (e) { return "deltaX" in e ? e.deltaX : "wheelDeltaX" in e ? -e.wheelDeltaX : 0 }, deltaY: function (e) { return "deltaY" in e ? e.deltaY : "wheelDeltaY" in e ? -e.wheelDeltaY : "wheelDelta" in e ? -e.wheelDelta : 0 }, deltaZ: 0, deltaMode: 0 }), Rn = an(jn), An = [9, 13, 27, 32], Nn = c && "CompositionEvent" in window, Mn = null; c && "documentMode" in document && (Mn = document.documentMode); var Ln = c && "TextEvent" in window && !Mn, In = c && (!Nn || Mn && 8 < Mn && 11 >= Mn), Fn = String.fromCharCode(32), Dn = !1; function Un(e, t) { switch (e) { case "keyup": return -1 !== An.indexOf(t.keyCode); case "keydown": return 229 !== t.keyCode; case "keypress": case "mousedown": case "focusout": return !0; default: return !1 } } function Vn(e) { return "object" === typeof (e = e.detail) && "data" in e ? e.data : null } var Bn = !1; var $n = { color: !0, date: !0, datetime: !0, "datetime-local": !0, email: !0, month: !0, number: !0, password: !0, range: !0, search: !0, tel: !0, text: !0, time: !0, url: !0, week: !0 }; function qn(e) { var t = e && e.nodeName && e.nodeName.toLowerCase(); return "input" === t ? !!$n[e.type] : "textarea" === t } function Wn(e, t, n, r) { Ce(r), 0 < (t = Hr(t, "onChange")).length && (n = new cn("onChange", "change", null, n, r), e.push({ event: n, listeners: t })) } var Hn = null, Qn = null; function Kn(e) { Fr(e, 0) } function Gn(e) { if (Q(_a(e))) return e } function Yn(e, t) { if ("change" === e) return t } var Xn = !1; if (c) { var Zn; if (c) { var Jn = "oninput" in document; if (!Jn) { var er = document.createElement("div"); er.setAttribute("oninput", "return;"), Jn = "function" === typeof er.oninput } Zn = Jn } else Zn = !1; Xn = Zn && (!document.documentMode || 9 < document.documentMode) } function tr() { Hn && (Hn.detachEvent("onpropertychange", nr), Qn = Hn = null) } function nr(e) { if ("value" === e.propertyName && Gn(Qn)) { var t = []; Wn(t, Qn, e, Se(e)), je(Kn, t) } } function rr(e, t, n) { "focusin" === e ? (tr(), Qn = n, (Hn = t).attachEvent("onpropertychange", nr)) : "focusout" === e && tr() } function ar(e) { if ("selectionchange" === e || "keyup" === e || "keydown" === e) return Gn(Qn) } function lr(e, t) { if ("click" === e) return Gn(t) } function or(e, t) { if ("input" === e || "change" === e) return Gn(t) } var ir = "function" === typeof Object.is ? Object.is : function (e, t) { return e === t && (0 !== e || 1 / e === 1 / t) || e !== e && t !== t }; function ur(e, t) { if (ir(e, t)) return !0; if ("object" !== typeof e || null === e || "object" !== typeof t || null === t) return !1; var n = Object.keys(e), r = Object.keys(t); if (n.length !== r.length) return !1; for (r = 0; r < n.length; r++) { var a = n[r]; if (!f.call(t, a) || !ir(e[a], t[a])) return !1 } return !0 } function sr(e) { for (; e && e.firstChild;)e = e.firstChild; return e } function cr(e, t) { var n, r = sr(e); for (e = 0; r;) { if (3 === r.nodeType) { if (n = e + r.textContent.length, e <= t && n >= t) return { node: r, offset: t - e }; e = n } e: { for (; r;) { if (r.nextSibling) { r = r.nextSibling; break e } r = r.parentNode } r = void 0 } r = sr(r) } } function fr(e, t) { return !(!e || !t) && (e === t || (!e || 3 !== e.nodeType) && (t && 3 === t.nodeType ? fr(e, t.parentNode) : "contains" in e ? e.contains(t) : !!e.compareDocumentPosition && !!(16 & e.compareDocumentPosition(t)))) } function dr() { for (var e = window, t = K(); t instanceof e.HTMLIFrameElement;) { try { var n = "string" === typeof t.contentWindow.location.href } catch (r) { n = !1 } if (!n) break; t = K((e = t.contentWindow).document) } return t } function pr(e) { var t = e && e.nodeName && e.nodeName.toLowerCase(); return t && ("input" === t && ("text" === e.type || "search" === e.type || "tel" === e.type || "url" === e.type || "password" === e.type) || "textarea" === t || "true" === e.contentEditable) } function mr(e) { var t = dr(), n = e.focusedElem, r = e.selectionRange; if (t !== n && n && n.ownerDocument && fr(n.ownerDocument.documentElement, n)) { if (null !== r && pr(n)) if (t = r.start, void 0 === (e = r.end) && (e = t), "selectionStart" in n) n.selectionStart = t, n.selectionEnd = Math.min(e, n.value.length); else if ((e = (t = n.ownerDocument || document) && t.defaultView || window).getSelection) { e = e.getSelection(); var a = n.textContent.length, l = Math.min(r.start, a); r = void 0 === r.end ? l : Math.min(r.end, a), !e.extend && l > r && (a = r, r = l, l = a), a = cr(n, l); var o = cr(n, r); a && o && (1 !== e.rangeCount || e.anchorNode !== a.node || e.anchorOffset !== a.offset || e.focusNode !== o.node || e.focusOffset !== o.offset) && ((t = t.createRange()).setStart(a.node, a.offset), e.removeAllRanges(), l > r ? (e.addRange(t), e.extend(o.node, o.offset)) : (t.setEnd(o.node, o.offset), e.addRange(t))) } for (t = [], e = n; e = e.parentNode;)1 === e.nodeType && t.push({ element: e, left: e.scrollLeft, top: e.scrollTop }); for ("function" === typeof n.focus && n.focus(), n = 0; n < t.length; n++)(e = t[n]).element.scrollLeft = e.left, e.element.scrollTop = e.top } } var hr = c && "documentMode" in document && 11 >= document.documentMode, yr = null, vr = null, br = null, gr = !1; function _r(e, t, n) { var r = n.window === n ? n.document : 9 === n.nodeType ? n : n.ownerDocument; gr || null == yr || yr !== K(r) || ("selectionStart" in (r = yr) && pr(r) ? r = { start: r.selectionStart, end: r.selectionEnd } : r = { anchorNode: (r = (r.ownerDocument && r.ownerDocument.defaultView || window).getSelection()).anchorNode, anchorOffset: r.anchorOffset, focusNode: r.focusNode, focusOffset: r.focusOffset }, br && ur(br, r) || (br = r, 0 < (r = Hr(vr, "onSelect")).length && (t = new cn("onSelect", "select", null, t, n), e.push({ event: t, listeners: r }), t.target = yr))) } function Sr(e, t) { var n = {}; return n[e.toLowerCase()] = t.toLowerCase(), n["Webkit" + e] = "webkit" + t, n["Moz" + e] = "moz" + t, n } var wr = { animationend: Sr("Animation", "AnimationEnd"), animationiteration: Sr("Animation", "AnimationIteration"), animationstart: Sr("Animation", "AnimationStart"), transitionend: Sr("Transition", "TransitionEnd") }, kr = {}, Er = {}; function xr(e) { if (kr[e]) return kr[e]; if (!wr[e]) return e; var t, n = wr[e]; for (t in n) if (n.hasOwnProperty(t) && t in Er) return kr[e] = n[t]; return e } c && (Er = document.createElement("div").style, "AnimationEvent" in window || (delete wr.animationend.animation, delete wr.animationiteration.animation, delete wr.animationstart.animation), "TransitionEvent" in window || delete wr.transitionend.transition); var Cr = xr("animationend"), Tr = xr("animationiteration"), Pr = xr("animationstart"), Or = xr("transitionend"), zr = new Map, jr = "abort auxClick cancel canPlay canPlayThrough click close contextMenu copy cut drag dragEnd dragEnter dragExit dragLeave dragOver dragStart drop durationChange emptied encrypted ended error gotPointerCapture input invalid keyDown keyPress keyUp load loadedData loadedMetadata loadStart lostPointerCapture mouseDown mouseMove mouseOut mouseOver mouseUp paste pause play playing pointerCancel pointerDown pointerMove pointerOut pointerOver pointerUp progress rateChange reset resize seeked seeking stalled submit suspend timeUpdate touchCancel touchEnd touchStart volumeChange scroll toggle touchMove waiting wheel".split(" "); function Rr(e, t) { zr.set(e, t), u(t, [e]) } for (var Ar = 0; Ar < jr.length; Ar++) { var Nr = jr[Ar]; Rr(Nr.toLowerCase(), "on" + (Nr[0].toUpperCase() + Nr.slice(1))) } Rr(Cr, "onAnimationEnd"), Rr(Tr, "onAnimationIteration"), Rr(Pr, "onAnimationStart"), Rr("dblclick", "onDoubleClick"), Rr("focusin", "onFocus"), Rr("focusout", "onBlur"), Rr(Or, "onTransitionEnd"), s("onMouseEnter", ["mouseout", "mouseover"]), s("onMouseLeave", ["mouseout", "mouseover"]), s("onPointerEnter", ["pointerout", "pointerover"]), s("onPointerLeave", ["pointerout", "pointerover"]), u("onChange", "change click focusin focusout input keydown keyup selectionchange".split(" ")), u("onSelect", "focusout contextmenu dragend focusin keydown keyup mousedown mouseup selectionchange".split(" ")), u("onBeforeInput", ["compositionend", "keypress", "textInput", "paste"]), u("onCompositionEnd", "compositionend focusout keydown keypress keyup mousedown".split(" ")), u("onCompositionStart", "compositionstart focusout keydown keypress keyup mousedown".split(" ")), u("onCompositionUpdate", "compositionupdate focusout keydown keypress keyup mousedown".split(" ")); var Mr = "abort canplay canplaythrough durationchange emptied encrypted ended error loadeddata loadedmetadata loadstart pause play playing progress ratechange resize seeked seeking stalled suspend timeupdate volumechange waiting".split(" "), Lr = new Set("cancel close invalid load scroll toggle".split(" ").concat(Mr)); function Ir(e, t, n) { var r = e.type || "unknown-event"; e.currentTarget = n, function (e, t, n, r, a, o, i, u, s) { if (Ve.apply(this, arguments), Le) { if (!Le) throw Error(l(198)); var c = Ie; Le = !1, Ie = null, Fe || (Fe = !0, De = c) } }(r, t, void 0, e), e.currentTarget = null } function Fr(e, t) { t = 0 !== (4 & t); for (var n = 0; n < e.length; n++) { var r = e[n], a = r.event; r = r.listeners; e: { var l = void 0; if (t) for (var o = r.length - 1; 0 <= o; o--) { var i = r[o], u = i.instance, s = i.currentTarget; if (i = i.listener, u !== l && a.isPropagationStopped()) break e; Ir(a, i, s), l = u } else for (o = 0; o < r.length; o++) { if (u = (i = r[o]).instance, s = i.currentTarget, i = i.listener, u !== l && a.isPropagationStopped()) break e; Ir(a, i, s), l = u } } } if (Fe) throw e = De, Fe = !1, De = null, e } function Dr(e, t) { var n = t[ha]; void 0 === n && (n = t[ha] = new Set); var r = e + "__bubble"; n.has(r) || ($r(t, e, 2, !1), n.add(r)) } function Ur(e, t, n) { var r = 0; t && (r |= 4), $r(n, e, r, t) } var Vr = "_reactListening" + Math.random().toString(36).slice(2); function Br(e) { if (!e[Vr]) { e[Vr] = !0, o.forEach((function (t) { "selectionchange" !== t && (Lr.has(t) || Ur(t, !1, e), Ur(t, !0, e)) })); var t = 9 === e.nodeType ? e : e.ownerDocument; null === t || t[Vr] || (t[Vr] = !0, Ur("selectionchange", !1, t)) } } function $r(e, t, n, r) { switch (Yt(t)) { case 1: var a = Wt; break; case 4: a = Ht; break; default: a = Qt }n = a.bind(null, t, n, e), a = void 0, !Ae || "touchstart" !== t && "touchmove" !== t && "wheel" !== t || (a = !0), r ? void 0 !== a ? e.addEventListener(t, n, { capture: !0, passive: a }) : e.addEventListener(t, n, !0) : void 0 !== a ? e.addEventListener(t, n, { passive: a }) : e.addEventListener(t, n, !1) } function qr(e, t, n, r, a) { var l = r; if (0 === (1 & t) && 0 === (2 & t) && null !== r) e: for (; ;) { if (null === r) return; var o = r.tag; if (3 === o || 4 === o) { var i = r.stateNode.containerInfo; if (i === a || 8 === i.nodeType && i.parentNode === a) break; if (4 === o) for (o = r.return; null !== o;) { var u = o.tag; if ((3 === u || 4 === u) && ((u = o.stateNode.containerInfo) === a || 8 === u.nodeType && u.parentNode === a)) return; o = o.return } for (; null !== i;) { if (null === (o = ba(i))) return; if (5 === (u = o.tag) || 6 === u) { r = l = o; continue e } i = i.parentNode } } r = r.return } je((function () { var r = l, a = Se(n), o = []; e: { var i = zr.get(e); if (void 0 !== i) { var u = cn, s = e; switch (e) { case "keypress": if (0 === tn(n)) break e; case "keydown": case "keyup": u = Tn; break; case "focusin": s = "focus", u = yn; break; case "focusout": s = "blur", u = yn; break; case "beforeblur": case "afterblur": u = yn; break; case "click": if (2 === n.button) break e; case "auxclick": case "dblclick": case "mousedown": case "mousemove": case "mouseup": case "mouseout": case "mouseover": case "contextmenu": u = mn; break; case "drag": case "dragend": case "dragenter": case "dragexit": case "dragleave": case "dragover": case "dragstart": case "drop": u = hn; break; case "touchcancel": case "touchend": case "touchmove": case "touchstart": u = On; break; case Cr: case Tr: case Pr: u = vn; break; case Or: u = zn; break; case "scroll": u = dn; break; case "wheel": u = Rn; break; case "copy": case "cut": case "paste": u = gn; break; case "gotpointercapture": case "lostpointercapture": case "pointercancel": case "pointerdown": case "pointermove": case "pointerout": case "pointerover": case "pointerup": u = Pn }var c = 0 !== (4 & t), f = !c && "scroll" === e, d = c ? null !== i ? i + "Capture" : null : i; c = []; for (var p, m = r; null !== m;) { var h = (p = m).stateNode; if (5 === p.tag && null !== h && (p = h, null !== d && (null != (h = Re(m, d)) && c.push(Wr(m, h, p)))), f) break; m = m.return } 0 < c.length && (i = new u(i, s, null, n, a), o.push({ event: i, listeners: c })) } } if (0 === (7 & t)) { if (u = "mouseout" === e || "pointerout" === e, (!(i = "mouseover" === e || "pointerover" === e) || n === _e || !(s = n.relatedTarget || n.fromElement) || !ba(s) && !s[ma]) && (u || i) && (i = a.window === a ? a : (i = a.ownerDocument) ? i.defaultView || i.parentWindow : window, u ? (u = r, null !== (s = (s = n.relatedTarget || n.toElement) ? ba(s) : null) && (s !== (f = Be(s)) || 5 !== s.tag && 6 !== s.tag) && (s = null)) : (u = null, s = r), u !== s)) { if (c = mn, h = "onMouseLeave", d = "onMouseEnter", m = "mouse", "pointerout" !== e && "pointerover" !== e || (c = Pn, h = "onPointerLeave", d = "onPointerEnter", m = "pointer"), f = null == u ? i : _a(u), p = null == s ? i : _a(s), (i = new c(h, m + "leave", u, n, a)).target = f, i.relatedTarget = p, h = null, ba(a) === r && ((c = new c(d, m + "enter", s, n, a)).target = p, c.relatedTarget = f, h = c), f = h, u && s) e: { for (d = s, m = 0, p = c = u; p; p = Qr(p))m++; for (p = 0, h = d; h; h = Qr(h))p++; for (; 0 < m - p;)c = Qr(c), m--; for (; 0 < p - m;)d = Qr(d), p--; for (; m--;) { if (c === d || null !== d && c === d.alternate) break e; c = Qr(c), d = Qr(d) } c = null } else c = null; null !== u && Kr(o, i, u, c, !1), null !== s && null !== f && Kr(o, f, s, c, !0) } if ("select" === (u = (i = r ? _a(r) : window).nodeName && i.nodeName.toLowerCase()) || "input" === u && "file" === i.type) var y = Yn; else if (qn(i)) if (Xn) y = or; else { y = ar; var v = rr } else (u = i.nodeName) && "input" === u.toLowerCase() && ("checkbox" === i.type || "radio" === i.type) && (y = lr); switch (y && (y = y(e, r)) ? Wn(o, y, n, a) : (v && v(e, i, r), "focusout" === e && (v = i._wrapperState) && v.controlled && "number" === i.type && ee(i, "number", i.value)), v = r ? _a(r) : window, e) { case "focusin": (qn(v) || "true" === v.contentEditable) && (yr = v, vr = r, br = null); break; case "focusout": br = vr = yr = null; break; case "mousedown": gr = !0; break; case "contextmenu": case "mouseup": case "dragend": gr = !1, _r(o, n, a); break; case "selectionchange": if (hr) break; case "keydown": case "keyup": _r(o, n, a) }var b; if (Nn) e: { switch (e) { case "compositionstart": var g = "onCompositionStart"; break e; case "compositionend": g = "onCompositionEnd"; break e; case "compositionupdate": g = "onCompositionUpdate"; break e }g = void 0 } else Bn ? Un(e, n) && (g = "onCompositionEnd") : "keydown" === e && 229 === n.keyCode && (g = "onCompositionStart"); g && (In && "ko" !== n.locale && (Bn || "onCompositionStart" !== g ? "onCompositionEnd" === g && Bn && (b = en()) : (Zt = "value" in (Xt = a) ? Xt.value : Xt.textContent, Bn = !0)), 0 < (v = Hr(r, g)).length && (g = new _n(g, e, null, n, a), o.push({ event: g, listeners: v }), b ? g.data = b : null !== (b = Vn(n)) && (g.data = b))), (b = Ln ? function (e, t) { switch (e) { case "compositionend": return Vn(t); case "keypress": return 32 !== t.which ? null : (Dn = !0, Fn); case "textInput": return (e = t.data) === Fn && Dn ? null : e; default: return null } }(e, n) : function (e, t) { if (Bn) return "compositionend" === e || !Nn && Un(e, t) ? (e = en(), Jt = Zt = Xt = null, Bn = !1, e) : null; switch (e) { case "paste": default: return null; case "keypress": if (!(t.ctrlKey || t.altKey || t.metaKey) || t.ctrlKey && t.altKey) { if (t.char && 1 < t.char.length) return t.char; if (t.which) return String.fromCharCode(t.which) } return null; case "compositionend": return In && "ko" !== t.locale ? null : t.data } }(e, n)) && (0 < (r = Hr(r, "onBeforeInput")).length && (a = new _n("onBeforeInput", "beforeinput", null, n, a), o.push({ event: a, listeners: r }), a.data = b)) } Fr(o, t) })) } function Wr(e, t, n) { return { instance: e, listener: t, currentTarget: n } } function Hr(e, t) { for (var n = t + "Capture", r = []; null !== e;) { var a = e, l = a.stateNode; 5 === a.tag && null !== l && (a = l, null != (l = Re(e, n)) && r.unshift(Wr(e, l, a)), null != (l = Re(e, t)) && r.push(Wr(e, l, a))), e = e.return } return r } function Qr(e) { if (null === e) return null; do { e = e.return } while (e && 5 !== e.tag); return e || null } function Kr(e, t, n, r, a) { for (var l = t._reactName, o = []; null !== n && n !== r;) { var i = n, u = i.alternate, s = i.stateNode; if (null !== u && u === r) break; 5 === i.tag && null !== s && (i = s, a ? null != (u = Re(n, l)) && o.unshift(Wr(n, u, i)) : a || null != (u = Re(n, l)) && o.push(Wr(n, u, i))), n = n.return } 0 !== o.length && e.push({ event: t, listeners: o }) } var Gr = /\r\n?/g, Yr = /\u0000|\uFFFD/g; function Xr(e) { return ("string" === typeof e ? e : "" + e).replace(Gr, "\n").replace(Yr, "") } function Zr(e, t, n) { if (t = Xr(t), Xr(e) !== t && n) throw Error(l(425)) } function Jr() { } var ea = null, ta = null; function na(e, t) { return "textarea" === e || "noscript" === e || "string" === typeof t.children || "number" === typeof t.children || "object" === typeof t.dangerouslySetInnerHTML && null !== t.dangerouslySetInnerHTML && null != t.dangerouslySetInnerHTML.__html } var ra = "function" === typeof setTimeout ? setTimeout : void 0, aa = "function" === typeof clearTimeout ? clearTimeout : void 0, la = "function" === typeof Promise ? Promise : void 0, oa = "function" === typeof queueMicrotask ? queueMicrotask : "undefined" !== typeof la ? function (e) { return la.resolve(null).then(e).catch(ia) } : ra; function ia(e) { setTimeout((function () { throw e })) } function ua(e, t) { var n = t, r = 0; do { var a = n.nextSibling; if (e.removeChild(n), a && 8 === a.nodeType) if ("/$" === (n = a.data)) { if (0 === r) return e.removeChild(a), void Bt(t); r-- } else "$" !== n && "$?" !== n && "$!" !== n || r++; n = a } while (n); Bt(t) } function sa(e) { for (; null != e; e = e.nextSibling) { var t = e.nodeType; if (1 === t || 3 === t) break; if (8 === t) { if ("$" === (t = e.data) || "$!" === t || "$?" === t) break; if ("/$" === t) return null } } return e } function ca(e) { e = e.previousSibling; for (var t = 0; e;) { if (8 === e.nodeType) { var n = e.data; if ("$" === n || "$!" === n || "$?" === n) { if (0 === t) return e; t-- } else "/$" === n && t++ } e = e.previousSibling } return null } var fa = Math.random().toString(36).slice(2), da = "__reactFiber$" + fa, pa = "__reactProps$" + fa, ma = "__reactContainer$" + fa, ha = "__reactEvents$" + fa, ya = "__reactListeners$" + fa, va = "__reactHandles$" + fa; function ba(e) { var t = e[da]; if (t) return t; for (var n = e.parentNode; n;) { if (t = n[ma] || n[da]) { if (n = t.alternate, null !== t.child || null !== n && null !== n.child) for (e = ca(e); null !== e;) { if (n = e[da]) return n; e = ca(e) } return t } n = (e = n).parentNode } return null } function ga(e) { return !(e = e[da] || e[ma]) || 5 !== e.tag && 6 !== e.tag && 13 !== e.tag && 3 !== e.tag ? null : e } function _a(e) { if (5 === e.tag || 6 === e.tag) return e.stateNode; throw Error(l(33)) } function Sa(e) { return e[pa] || null } var wa = [], ka = -1; function Ea(e) { return { current: e } } function xa(e) { 0 > ka || (e.current = wa[ka], wa[ka] = null, ka--) } function Ca(e, t) { ka++, wa[ka] = e.current, e.current = t } var Ta = {}, Pa = Ea(Ta), Oa = Ea(!1), za = Ta; function ja(e, t) { var n = e.type.contextTypes; if (!n) return Ta; var r = e.stateNode; if (r && r.__reactInternalMemoizedUnmaskedChildContext === t) return r.__reactInternalMemoizedMaskedChildContext; var a, l = {}; for (a in n) l[a] = t[a]; return r && ((e = e.stateNode).__reactInternalMemoizedUnmaskedChildContext = t, e.__reactInternalMemoizedMaskedChildContext = l), l } function Ra(e) { return null !== (e = e.childContextTypes) && void 0 !== e } function Aa() { xa(Oa), xa(Pa) } function Na(e, t, n) { if (Pa.current !== Ta) throw Error(l(168)); Ca(Pa, t), Ca(Oa, n) } function Ma(e, t, n) { var r = e.stateNode; if (t = t.childContextTypes, "function" !== typeof r.getChildContext) return n; for (var a in r = r.getChildContext()) if (!(a in t)) throw Error(l(108, $(e) || "Unknown", a)); return I({}, n, r) } function La(e) { return e = (e = e.stateNode) && e.__reactInternalMemoizedMergedChildContext || Ta, za = Pa.current, Ca(Pa, e), Ca(Oa, Oa.current), !0 } function Ia(e, t, n) { var r = e.stateNode; if (!r) throw Error(l(169)); n ? (e = Ma(e, t, za), r.__reactInternalMemoizedMergedChildContext = e, xa(Oa), xa(Pa), Ca(Pa, e)) : xa(Oa), Ca(Oa, n) } var Fa = null, Da = !1, Ua = !1; function Va(e) { null === Fa ? Fa = [e] : Fa.push(e) } function Ba() { if (!Ua && null !== Fa) { Ua = !0; var e = 0, t = gt; try { var n = Fa; for (gt = 1; e < n.length; e++) { var r = n[e]; do { r = r(!0) } while (null !== r) } Fa = null, Da = !1 } catch (a) { throw null !== Fa && (Fa = Fa.slice(e + 1)), Qe(Je, Ba), a } finally { gt = t, Ua = !1 } } return null } var $a = [], qa = 0, Wa = null, Ha = 0, Qa = [], Ka = 0, Ga = null, Ya = 1, Xa = ""; function Za(e, t) { $a[qa++] = Ha, $a[qa++] = Wa, Wa = e, Ha = t } function Ja(e, t, n) { Qa[Ka++] = Ya, Qa[Ka++] = Xa, Qa[Ka++] = Ga, Ga = e; var r = Ya; e = Xa; var a = 32 - ot(r) - 1; r &= ~(1 << a), n += 1; var l = 32 - ot(t) + a; if (30 < l) { var o = a - a % 5; l = (r & (1 << o) - 1).toString(32), r >>= o, a -= o, Ya = 1 << 32 - ot(t) + a | n << a | r, Xa = l + e } else Ya = 1 << l | n << a | r, Xa = e } function el(e) { null !== e.return && (Za(e, 1), Ja(e, 1, 0)) } function tl(e) { for (; e === Wa;)Wa = $a[--qa], $a[qa] = null, Ha = $a[--qa], $a[qa] = null; for (; e === Ga;)Ga = Qa[--Ka], Qa[Ka] = null, Xa = Qa[--Ka], Qa[Ka] = null, Ya = Qa[--Ka], Qa[Ka] = null } var nl = null, rl = null, al = !1, ll = null; function ol(e, t) { var n = Rs(5, null, null, 0); n.elementType = "DELETED", n.stateNode = t, n.return = e, null === (t = e.deletions) ? (e.deletions = [n], e.flags |= 16) : t.push(n) } function il(e, t) { switch (e.tag) { case 5: var n = e.type; return null !== (t = 1 !== t.nodeType || n.toLowerCase() !== t.nodeName.toLowerCase() ? null : t) && (e.stateNode = t, nl = e, rl = sa(t.firstChild), !0); case 6: return null !== (t = "" === e.pendingProps || 3 !== t.nodeType ? null : t) && (e.stateNode = t, nl = e, rl = null, !0); case 13: return null !== (t = 8 !== t.nodeType ? null : t) && (n = null !== Ga ? { id: Ya, overflow: Xa } : null, e.memoizedState = { dehydrated: t, treeContext: n, retryLane: 1073741824 }, (n = Rs(18, null, null, 0)).stateNode = t, n.return = e, e.child = n, nl = e, rl = null, !0); default: return !1 } } function ul(e) { return 0 !== (1 & e.mode) && 0 === (128 & e.flags) } function sl(e) { if (al) { var t = rl; if (t) { var n = t; if (!il(e, t)) { if (ul(e)) throw Error(l(418)); t = sa(n.nextSibling); var r = nl; t && il(e, t) ? ol(r, n) : (e.flags = -4097 & e.flags | 2, al = !1, nl = e) } } else { if (ul(e)) throw Error(l(418)); e.flags = -4097 & e.flags | 2, al = !1, nl = e } } } function cl(e) { for (e = e.return; null !== e && 5 !== e.tag && 3 !== e.tag && 13 !== e.tag;)e = e.return; nl = e } function fl(e) { if (e !== nl) return !1; if (!al) return cl(e), al = !0, !1; var t; if ((t = 3 !== e.tag) && !(t = 5 !== e.tag) && (t = "head" !== (t = e.type) && "body" !== t && !na(e.type, e.memoizedProps)), t && (t = rl)) { if (ul(e)) throw dl(), Error(l(418)); for (; t;)ol(e, t), t = sa(t.nextSibling) } if (cl(e), 13 === e.tag) { if (!(e = null !== (e = e.memoizedState) ? e.dehydrated : null)) throw Error(l(317)); e: { for (e = e.nextSibling, t = 0; e;) { if (8 === e.nodeType) { var n = e.data; if ("/$" === n) { if (0 === t) { rl = sa(e.nextSibling); break e } t-- } else "$" !== n && "$!" !== n && "$?" !== n || t++ } e = e.nextSibling } rl = null } } else rl = nl ? sa(e.stateNode.nextSibling) : null; return !0 } function dl() { for (var e = rl; e;)e = sa(e.nextSibling) } function pl() { rl = nl = null, al = !1 } function ml(e) { null === ll ? ll = [e] : ll.push(e) } var hl = _.ReactCurrentBatchConfig; function yl(e, t) { if (e && e.defaultProps) { for (var n in t = I({}, t), e = e.defaultProps) void 0 === t[n] && (t[n] = e[n]); return t } return t } var vl = Ea(null), bl = null, gl = null, _l = null; function Sl() { _l = gl = bl = null } function wl(e) { var t = vl.current; xa(vl), e._currentValue = t } function kl(e, t, n) { for (; null !== e;) { var r = e.alternate; if ((e.childLanes & t) !== t ? (e.childLanes |= t, null !== r && (r.childLanes |= t)) : null !== r && (r.childLanes & t) !== t && (r.childLanes |= t), e === n) break; e = e.return } } function El(e, t) { bl = e, _l = gl = null, null !== (e = e.dependencies) && null !== e.firstContext && (0 !== (e.lanes & t) && (_i = !0), e.firstContext = null) } function xl(e) { var t = e._currentValue; if (_l !== e) if (e = { context: e, memoizedValue: t, next: null }, null === gl) { if (null === bl) throw Error(l(308)); gl = e, bl.dependencies = { lanes: 0, firstContext: e } } else gl = gl.next = e; return t } var Cl = null; function Tl(e) { null === Cl ? Cl = [e] : Cl.push(e) } function Pl(e, t, n, r) { var a = t.interleaved; return null === a ? (n.next = n, Tl(t)) : (n.next = a.next, a.next = n), t.interleaved = n, Ol(e, r) } function Ol(e, t) { e.lanes |= t; var n = e.alternate; for (null !== n && (n.lanes |= t), n = e, e = e.return; null !== e;)e.childLanes |= t, null !== (n = e.alternate) && (n.childLanes |= t), n = e, e = e.return; return 3 === n.tag ? n.stateNode : null } var zl = !1; function jl(e) { e.updateQueue = { baseState: e.memoizedState, firstBaseUpdate: null, lastBaseUpdate: null, shared: { pending: null, interleaved: null, lanes: 0 }, effects: null } } function Rl(e, t) { e = e.updateQueue, t.updateQueue === e && (t.updateQueue = { baseState: e.baseState, firstBaseUpdate: e.firstBaseUpdate, lastBaseUpdate: e.lastBaseUpdate, shared: e.shared, effects: e.effects }) } function Al(e, t) { return { eventTime: e, lane: t, tag: 0, payload: null, callback: null, next: null } } function Nl(e, t, n) { var r = e.updateQueue; if (null === r) return null; if (r = r.shared, 0 !== (2 & Ou)) { var a = r.pending; return null === a ? t.next = t : (t.next = a.next, a.next = t), r.pending = t, Ol(e, n) } return null === (a = r.interleaved) ? (t.next = t, Tl(r)) : (t.next = a.next, a.next = t), r.interleaved = t, Ol(e, n) } function Ml(e, t, n) { if (null !== (t = t.updateQueue) && (t = t.shared, 0 !== (4194240 & n))) { var r = t.lanes; n |= r &= e.pendingLanes, t.lanes = n, bt(e, n) } } function Ll(e, t) { var n = e.updateQueue, r = e.alternate; if (null !== r && n === (r = r.updateQueue)) { var a = null, l = null; if (null !== (n = n.firstBaseUpdate)) { do { var o = { eventTime: n.eventTime, lane: n.lane, tag: n.tag, payload: n.payload, callback: n.callback, next: null }; null === l ? a = l = o : l = l.next = o, n = n.next } while (null !== n); null === l ? a = l = t : l = l.next = t } else a = l = t; return n = { baseState: r.baseState, firstBaseUpdate: a, lastBaseUpdate: l, shared: r.shared, effects: r.effects }, void (e.updateQueue = n) } null === (e = n.lastBaseUpdate) ? n.firstBaseUpdate = t : e.next = t, n.lastBaseUpdate = t } function Il(e, t, n, r) { var a = e.updateQueue; zl = !1; var l = a.firstBaseUpdate, o = a.lastBaseUpdate, i = a.shared.pending; if (null !== i) { a.shared.pending = null; var u = i, s = u.next; u.next = null, null === o ? l = s : o.next = s, o = u; var c = e.alternate; null !== c && ((i = (c = c.updateQueue).lastBaseUpdate) !== o && (null === i ? c.firstBaseUpdate = s : i.next = s, c.lastBaseUpdate = u)) } if (null !== l) { var f = a.baseState; for (o = 0, c = s = u = null, i = l; ;) { var d = i.lane, p = i.eventTime; if ((r & d) === d) { null !== c && (c = c.next = { eventTime: p, lane: 0, tag: i.tag, payload: i.payload, callback: i.callback, next: null }); e: { var m = e, h = i; switch (d = t, p = n, h.tag) { case 1: if ("function" === typeof (m = h.payload)) { f = m.call(p, f, d); break e } f = m; break e; case 3: m.flags = -65537 & m.flags | 128; case 0: if (null === (d = "function" === typeof (m = h.payload) ? m.call(p, f, d) : m) || void 0 === d) break e; f = I({}, f, d); break e; case 2: zl = !0 } } null !== i.callback && 0 !== i.lane && (e.flags |= 64, null === (d = a.effects) ? a.effects = [i] : d.push(i)) } else p = { eventTime: p, lane: d, tag: i.tag, payload: i.payload, callback: i.callback, next: null }, null === c ? (s = c = p, u = f) : c = c.next = p, o |= d; if (null === (i = i.next)) { if (null === (i = a.shared.pending)) break; i = (d = i).next, d.next = null, a.lastBaseUpdate = d, a.shared.pending = null } } if (null === c && (u = f), a.baseState = u, a.firstBaseUpdate = s, a.lastBaseUpdate = c, null !== (t = a.shared.interleaved)) { a = t; do { o |= a.lane, a = a.next } while (a !== t) } else null === l && (a.shared.lanes = 0); Iu |= o, e.lanes = o, e.memoizedState = f } } function Fl(e, t, n) { if (e = t.effects, t.effects = null, null !== e) for (t = 0; t < e.length; t++) { var r = e[t], a = r.callback; if (null !== a) { if (r.callback = null, r = n, "function" !== typeof a) throw Error(l(191, a)); a.call(r) } } } var Dl = (new r.Component).refs; function Ul(e, t, n, r) { n = null === (n = n(r, t = e.memoizedState)) || void 0 === n ? t : I({}, t, n), e.memoizedState = n, 0 === e.lanes && (e.updateQueue.baseState = n) } var Vl = { isMounted: function (e) { return !!(e = e._reactInternals) && Be(e) === e }, enqueueSetState: function (e, t, n) { e = e._reactInternals; var r = ts(), a = ns(e), l = Al(r, a); l.payload = t, void 0 !== n && null !== n && (l.callback = n), null !== (t = Nl(e, l, a)) && (rs(t, e, a, r), Ml(t, e, a)) }, enqueueReplaceState: function (e, t, n) { e = e._reactInternals; var r = ts(), a = ns(e), l = Al(r, a); l.tag = 1, l.payload = t, void 0 !== n && null !== n && (l.callback = n), null !== (t = Nl(e, l, a)) && (rs(t, e, a, r), Ml(t, e, a)) }, enqueueForceUpdate: function (e, t) { e = e._reactInternals; var n = ts(), r = ns(e), a = Al(n, r); a.tag = 2, void 0 !== t && null !== t && (a.callback = t), null !== (t = Nl(e, a, r)) && (rs(t, e, r, n), Ml(t, e, r)) } }; function Bl(e, t, n, r, a, l, o) { return "function" === typeof (e = e.stateNode).shouldComponentUpdate ? e.shouldComponentUpdate(r, l, o) : !t.prototype || !t.prototype.isPureReactComponent || (!ur(n, r) || !ur(a, l)) } function $l(e, t, n) { var r = !1, a = Ta, l = t.contextType; return "object" === typeof l && null !== l ? l = xl(l) : (a = Ra(t) ? za : Pa.current, l = (r = null !== (r = t.contextTypes) && void 0 !== r) ? ja(e, a) : Ta), t = new t(n, l), e.memoizedState = null !== t.state && void 0 !== t.state ? t.state : null, t.updater = Vl, e.stateNode = t, t._reactInternals = e, r && ((e = e.stateNode).__reactInternalMemoizedUnmaskedChildContext = a, e.__reactInternalMemoizedMaskedChildContext = l), t } function ql(e, t, n, r) { e = t.state, "function" === typeof t.componentWillReceiveProps && t.componentWillReceiveProps(n, r), "function" === typeof t.UNSAFE_componentWillReceiveProps && t.UNSAFE_componentWillReceiveProps(n, r), t.state !== e && Vl.enqueueReplaceState(t, t.state, null) } function Wl(e, t, n, r) { var a = e.stateNode; a.props = n, a.state = e.memoizedState, a.refs = Dl, jl(e); var l = t.contextType; "object" === typeof l && null !== l ? a.context = xl(l) : (l = Ra(t) ? za : Pa.current, a.context = ja(e, l)), a.state = e.memoizedState, "function" === typeof (l = t.getDerivedStateFromProps) && (Ul(e, t, l, n), a.state = e.memoizedState), "function" === typeof t.getDerivedStateFromProps || "function" === typeof a.getSnapshotBeforeUpdate || "function" !== typeof a.UNSAFE_componentWillMount && "function" !== typeof a.componentWillMount || (t = a.state, "function" === typeof a.componentWillMount && a.componentWillMount(), "function" === typeof a.UNSAFE_componentWillMount && a.UNSAFE_componentWillMount(), t !== a.state && Vl.enqueueReplaceState(a, a.state, null), Il(e, n, a, r), a.state = e.memoizedState), "function" === typeof a.componentDidMount && (e.flags |= 4194308) } function Hl(e, t, n) { if (null !== (e = n.ref) && "function" !== typeof e && "object" !== typeof e) { if (n._owner) { if (n = n._owner) { if (1 !== n.tag) throw Error(l(309)); var r = n.stateNode } if (!r) throw Error(l(147, e)); var a = r, o = "" + e; return null !== t && null !== t.ref && "function" === typeof t.ref && t.ref._stringRef === o ? t.ref : (t = function (e) { var t = a.refs; t === Dl && (t = a.refs = {}), null === e ? delete t[o] : t[o] = e }, t._stringRef = o, t) } if ("string" !== typeof e) throw Error(l(284)); if (!n._owner) throw Error(l(290, e)) } return e } function Ql(e, t) { throw e = Object.prototype.toString.call(t), Error(l(31, "[object Object]" === e ? "object with keys {" + Object.keys(t).join(", ") + "}" : e)) } function Kl(e) { return (0, e._init)(e._payload) } function Gl(e) { function t(t, n) { if (e) { var r = t.deletions; null === r ? (t.deletions = [n], t.flags |= 16) : r.push(n) } } function n(n, r) { if (!e) return null; for (; null !== r;)t(n, r), r = r.sibling; return null } function r(e, t) { for (e = new Map; null !== t;)null !== t.key ? e.set(t.key, t) : e.set(t.index, t), t = t.sibling; return e } function a(e, t) { return (e = Ns(e, t)).index = 0, e.sibling = null, e } function o(t, n, r) { return t.index = r, e ? null !== (r = t.alternate) ? (r = r.index) < n ? (t.flags |= 2, n) : r : (t.flags |= 2, n) : (t.flags |= 1048576, n) } function i(t) { return e && null === t.alternate && (t.flags |= 2), t } function u(e, t, n, r) { return null === t || 6 !== t.tag ? ((t = Fs(n, e.mode, r)).return = e, t) : ((t = a(t, n)).return = e, t) } function s(e, t, n, r) { var l = n.type; return l === k ? f(e, t, n.props.children, r, n.key) : null !== t && (t.elementType === l || "object" === typeof l && null !== l && l.$$typeof === R && Kl(l) === t.type) ? ((r = a(t, n.props)).ref = Hl(e, t, n), r.return = e, r) : ((r = Ms(n.type, n.key, n.props, null, e.mode, r)).ref = Hl(e, t, n), r.return = e, r) } function c(e, t, n, r) { return null === t || 4 !== t.tag || t.stateNode.containerInfo !== n.containerInfo || t.stateNode.implementation !== n.implementation ? ((t = Ds(n, e.mode, r)).return = e, t) : ((t = a(t, n.children || [])).return = e, t) } function f(e, t, n, r, l) { return null === t || 7 !== t.tag ? ((t = Ls(n, e.mode, r, l)).return = e, t) : ((t = a(t, n)).return = e, t) } function d(e, t, n) { if ("string" === typeof t && "" !== t || "number" === typeof t) return (t = Fs("" + t, e.mode, n)).return = e, t; if ("object" === typeof t && null !== t) { switch (t.$$typeof) { case S: return (n = Ms(t.type, t.key, t.props, null, e.mode, n)).ref = Hl(e, null, t), n.return = e, n; case w: return (t = Ds(t, e.mode, n)).return = e, t; case R: return d(e, (0, t._init)(t._payload), n) }if (te(t) || M(t)) return (t = Ls(t, e.mode, n, null)).return = e, t; Ql(e, t) } return null } function p(e, t, n, r) { var a = null !== t ? t.key : null; if ("string" === typeof n && "" !== n || "number" === typeof n) return null !== a ? null : u(e, t, "" + n, r); if ("object" === typeof n && null !== n) { switch (n.$$typeof) { case S: return n.key === a ? s(e, t, n, r) : null; case w: return n.key === a ? c(e, t, n, r) : null; case R: return p(e, t, (a = n._init)(n._payload), r) }if (te(n) || M(n)) return null !== a ? null : f(e, t, n, r, null); Ql(e, n) } return null } function m(e, t, n, r, a) { if ("string" === typeof r && "" !== r || "number" === typeof r) return u(t, e = e.get(n) || null, "" + r, a); if ("object" === typeof r && null !== r) { switch (r.$$typeof) { case S: return s(t, e = e.get(null === r.key ? n : r.key) || null, r, a); case w: return c(t, e = e.get(null === r.key ? n : r.key) || null, r, a); case R: return m(e, t, n, (0, r._init)(r._payload), a) }if (te(r) || M(r)) return f(t, e = e.get(n) || null, r, a, null); Ql(t, r) } return null } function h(a, l, i, u) { for (var s = null, c = null, f = l, h = l = 0, y = null; null !== f && h < i.length; h++) { f.index > h ? (y = f, f = null) : y = f.sibling; var v = p(a, f, i[h], u); if (null === v) { null === f && (f = y); break } e && f && null === v.alternate && t(a, f), l = o(v, l, h), null === c ? s = v : c.sibling = v, c = v, f = y } if (h === i.length) return n(a, f), al && Za(a, h), s; if (null === f) { for (; h < i.length; h++)null !== (f = d(a, i[h], u)) && (l = o(f, l, h), null === c ? s = f : c.sibling = f, c = f); return al && Za(a, h), s } for (f = r(a, f); h < i.length; h++)null !== (y = m(f, a, h, i[h], u)) && (e && null !== y.alternate && f.delete(null === y.key ? h : y.key), l = o(y, l, h), null === c ? s = y : c.sibling = y, c = y); return e && f.forEach((function (e) { return t(a, e) })), al && Za(a, h), s } function y(a, i, u, s) { var c = M(u); if ("function" !== typeof c) throw Error(l(150)); if (null == (u = c.call(u))) throw Error(l(151)); for (var f = c = null, h = i, y = i = 0, v = null, b = u.next(); null !== h && !b.done; y++, b = u.next()) { h.index > y ? (v = h, h = null) : v = h.sibling; var g = p(a, h, b.value, s); if (null === g) { null === h && (h = v); break } e && h && null === g.alternate && t(a, h), i = o(g, i, y), null === f ? c = g : f.sibling = g, f = g, h = v } if (b.done) return n(a, h), al && Za(a, y), c; if (null === h) { for (; !b.done; y++, b = u.next())null !== (b = d(a, b.value, s)) && (i = o(b, i, y), null === f ? c = b : f.sibling = b, f = b); return al && Za(a, y), c } for (h = r(a, h); !b.done; y++, b = u.next())null !== (b = m(h, a, y, b.value, s)) && (e && null !== b.alternate && h.delete(null === b.key ? y : b.key), i = o(b, i, y), null === f ? c = b : f.sibling = b, f = b); return e && h.forEach((function (e) { return t(a, e) })), al && Za(a, y), c } return function e(r, l, o, u) { if ("object" === typeof o && null !== o && o.type === k && null === o.key && (o = o.props.children), "object" === typeof o && null !== o) { switch (o.$$typeof) { case S: e: { for (var s = o.key, c = l; null !== c;) { if (c.key === s) { if ((s = o.type) === k) { if (7 === c.tag) { n(r, c.sibling), (l = a(c, o.props.children)).return = r, r = l; break e } } else if (c.elementType === s || "object" === typeof s && null !== s && s.$$typeof === R && Kl(s) === c.type) { n(r, c.sibling), (l = a(c, o.props)).ref = Hl(r, c, o), l.return = r, r = l; break e } n(r, c); break } t(r, c), c = c.sibling } o.type === k ? ((l = Ls(o.props.children, r.mode, u, o.key)).return = r, r = l) : ((u = Ms(o.type, o.key, o.props, null, r.mode, u)).ref = Hl(r, l, o), u.return = r, r = u) } return i(r); case w: e: { for (c = o.key; null !== l;) { if (l.key === c) { if (4 === l.tag && l.stateNode.containerInfo === o.containerInfo && l.stateNode.implementation === o.implementation) { n(r, l.sibling), (l = a(l, o.children || [])).return = r, r = l; break e } n(r, l); break } t(r, l), l = l.sibling } (l = Ds(o, r.mode, u)).return = r, r = l } return i(r); case R: return e(r, l, (c = o._init)(o._payload), u) }if (te(o)) return h(r, l, o, u); if (M(o)) return y(r, l, o, u); Ql(r, o) } return "string" === typeof o && "" !== o || "number" === typeof o ? (o = "" + o, null !== l && 6 === l.tag ? (n(r, l.sibling), (l = a(l, o)).return = r, r = l) : (n(r, l), (l = Fs(o, r.mode, u)).return = r, r = l), i(r)) : n(r, l) } } var Yl = Gl(!0), Xl = Gl(!1), Zl = {}, Jl = Ea(Zl), eo = Ea(Zl), to = Ea(Zl); function no(e) { if (e === Zl) throw Error(l(174)); return e } function ro(e, t) { switch (Ca(to, t), Ca(eo, e), Ca(Jl, Zl), e = t.nodeType) { case 9: case 11: t = (t = t.documentElement) ? t.namespaceURI : ue(null, ""); break; default: t = ue(t = (e = 8 === e ? t.parentNode : t).namespaceURI || null, e = e.tagName) }xa(Jl), Ca(Jl, t) } function ao() { xa(Jl), xa(eo), xa(to) } function lo(e) { no(to.current); var t = no(Jl.current), n = ue(t, e.type); t !== n && (Ca(eo, e), Ca(Jl, n)) } function oo(e) { eo.current === e && (xa(Jl), xa(eo)) } var io = Ea(0); function uo(e) { for (var t = e; null !== t;) { if (13 === t.tag) { var n = t.memoizedState; if (null !== n && (null === (n = n.dehydrated) || "$?" === n.data || "$!" === n.data)) return t } else if (19 === t.tag && void 0 !== t.memoizedProps.revealOrder) { if (0 !== (128 & t.flags)) return t } else if (null !== t.child) { t.child.return = t, t = t.child; continue } if (t === e) break; for (; null === t.sibling;) { if (null === t.return || t.return === e) return null; t = t.return } t.sibling.return = t.return, t = t.sibling } return null } var so = []; function co() { for (var e = 0; e < so.length; e++)so[e]._workInProgressVersionPrimary = null; so.length = 0 } var fo = _.ReactCurrentDispatcher, po = _.ReactCurrentBatchConfig, mo = 0, ho = null, yo = null, vo = null, bo = !1, go = !1, _o = 0, So = 0; function wo() { throw Error(l(321)) } function ko(e, t) { if (null === t) return !1; for (var n = 0; n < t.length && n < e.length; n++)if (!ir(e[n], t[n])) return !1; return !0 } function Eo(e, t, n, r, a, o) { if (mo = o, ho = t, t.memoizedState = null, t.updateQueue = null, t.lanes = 0, fo.current = null === e || null === e.memoizedState ? ii : ui, e = n(r, a), go) { o = 0; do { if (go = !1, _o = 0, 25 <= o) throw Error(l(301)); o += 1, vo = yo = null, t.updateQueue = null, fo.current = si, e = n(r, a) } while (go) } if (fo.current = oi, t = null !== yo && null !== yo.next, mo = 0, vo = yo = ho = null, bo = !1, t) throw Error(l(300)); return e } function xo() { var e = 0 !== _o; return _o = 0, e } function Co() { var e = { memoizedState: null, baseState: null, baseQueue: null, queue: null, next: null }; return null === vo ? ho.memoizedState = vo = e : vo = vo.next = e, vo } function To() { if (null === yo) { var e = ho.alternate; e = null !== e ? e.memoizedState : null } else e = yo.next; var t = null === vo ? ho.memoizedState : vo.next; if (null !== t) vo = t, yo = e; else { if (null === e) throw Error(l(310)); e = { memoizedState: (yo = e).memoizedState, baseState: yo.baseState, baseQueue: yo.baseQueue, queue: yo.queue, next: null }, null === vo ? ho.memoizedState = vo = e : vo = vo.next = e } return vo } function Po(e, t) { return "function" === typeof t ? t(e) : t } function Oo(e) { var t = To(), n = t.queue; if (null === n) throw Error(l(311)); n.lastRenderedReducer = e; var r = yo, a = r.baseQueue, o = n.pending; if (null !== o) { if (null !== a) { var i = a.next; a.next = o.next, o.next = i } r.baseQueue = a = o, n.pending = null } if (null !== a) { o = a.next, r = r.baseState; var u = i = null, s = null, c = o; do { var f = c.lane; if ((mo & f) === f) null !== s && (s = s.next = { lane: 0, action: c.action, hasEagerState: c.hasEagerState, eagerState: c.eagerState, next: null }), r = c.hasEagerState ? c.eagerState : e(r, c.action); else { var d = { lane: f, action: c.action, hasEagerState: c.hasEagerState, eagerState: c.eagerState, next: null }; null === s ? (u = s = d, i = r) : s = s.next = d, ho.lanes |= f, Iu |= f } c = c.next } while (null !== c && c !== o); null === s ? i = r : s.next = u, ir(r, t.memoizedState) || (_i = !0), t.memoizedState = r, t.baseState = i, t.baseQueue = s, n.lastRenderedState = r } if (null !== (e = n.interleaved)) { a = e; do { o = a.lane, ho.lanes |= o, Iu |= o, a = a.next } while (a !== e) } else null === a && (n.lanes = 0); return [t.memoizedState, n.dispatch] } function zo(e) { var t = To(), n = t.queue; if (null === n) throw Error(l(311)); n.lastRenderedReducer = e; var r = n.dispatch, a = n.pending, o = t.memoizedState; if (null !== a) { n.pending = null; var i = a = a.next; do { o = e(o, i.action), i = i.next } while (i !== a); ir(o, t.memoizedState) || (_i = !0), t.memoizedState = o, null === t.baseQueue && (t.baseState = o), n.lastRenderedState = o } return [o, r] } function jo() { } function Ro(e, t) { var n = ho, r = To(), a = t(), o = !ir(r.memoizedState, a); if (o && (r.memoizedState = a, _i = !0), r = r.queue, qo(Mo.bind(null, n, r, e), [e]), r.getSnapshot !== t || o || null !== vo && 1 & vo.memoizedState.tag) { if (n.flags |= 2048, Do(9, No.bind(null, n, r, a, t), void 0, null), null === zu) throw Error(l(349)); 0 !== (30 & mo) || Ao(n, t, a) } return a } function Ao(e, t, n) { e.flags |= 16384, e = { getSnapshot: t, value: n }, null === (t = ho.updateQueue) ? (t = { lastEffect: null, stores: null }, ho.updateQueue = t, t.stores = [e]) : null === (n = t.stores) ? t.stores = [e] : n.push(e) } function No(e, t, n, r) { t.value = n, t.getSnapshot = r, Lo(t) && Io(e) } function Mo(e, t, n) { return n((function () { Lo(t) && Io(e) })) } function Lo(e) { var t = e.getSnapshot; e = e.value; try { var n = t(); return !ir(e, n) } catch (r) { return !0 } } function Io(e) { var t = Ol(e, 1); null !== t && rs(t, e, 1, -1) } function Fo(e) { var t = Co(); return "function" === typeof e && (e = e()), t.memoizedState = t.baseState = e, e = { pending: null, interleaved: null, lanes: 0, dispatch: null, lastRenderedReducer: Po, lastRenderedState: e }, t.queue = e, e = e.dispatch = ni.bind(null, ho, e), [t.memoizedState, e] } function Do(e, t, n, r) { return e = { tag: e, create: t, destroy: n, deps: r, next: null }, null === (t = ho.updateQueue) ? (t = { lastEffect: null, stores: null }, ho.updateQueue = t, t.lastEffect = e.next = e) : null === (n = t.lastEffect) ? t.lastEffect = e.next = e : (r = n.next, n.next = e, e.next = r, t.lastEffect = e), e } function Uo() { return To().memoizedState } function Vo(e, t, n, r) { var a = Co(); ho.flags |= e, a.memoizedState = Do(1 | t, n, void 0, void 0 === r ? null : r) } function Bo(e, t, n, r) { var a = To(); r = void 0 === r ? null : r; var l = void 0; if (null !== yo) { var o = yo.memoizedState; if (l = o.destroy, null !== r && ko(r, o.deps)) return void (a.memoizedState = Do(t, n, l, r)) } ho.flags |= e, a.memoizedState = Do(1 | t, n, l, r) } function $o(e, t) { return Vo(8390656, 8, e, t) } function qo(e, t) { return Bo(2048, 8, e, t) } function Wo(e, t) { return Bo(4, 2, e, t) } function Ho(e, t) { return Bo(4, 4, e, t) } function Qo(e, t) { return "function" === typeof t ? (e = e(), t(e), function () { t(null) }) : null !== t && void 0 !== t ? (e = e(), t.current = e, function () { t.current = null }) : void 0 } function Ko(e, t, n) { return n = null !== n && void 0 !== n ? n.concat([e]) : null, Bo(4, 4, Qo.bind(null, t, e), n) } function Go() { } function Yo(e, t) { var n = To(); t = void 0 === t ? null : t; var r = n.memoizedState; return null !== r && null !== t && ko(t, r[1]) ? r[0] : (n.memoizedState = [e, t], e) } function Xo(e, t) { var n = To(); t = void 0 === t ? null : t; var r = n.memoizedState; return null !== r && null !== t && ko(t, r[1]) ? r[0] : (e = e(), n.memoizedState = [e, t], e) } function Zo(e, t, n) { return 0 === (21 & mo) ? (e.baseState && (e.baseState = !1, _i = !0), e.memoizedState = n) : (ir(n, t) || (n = ht(), ho.lanes |= n, Iu |= n, e.baseState = !0), t) } function Jo(e, t) { var n = gt; gt = 0 !== n && 4 > n ? n : 4, e(!0); var r = po.transition; po.transition = {}; try { e(!1), t() } finally { gt = n, po.transition = r } } function ei() { return To().memoizedState } function ti(e, t, n) { var r = ns(e); if (n = { lane: r, action: n, hasEagerState: !1, eagerState: null, next: null }, ri(e)) ai(t, n); else if (null !== (n = Pl(e, t, n, r))) { rs(n, e, r, ts()), li(n, t, r) } } function ni(e, t, n) { var r = ns(e), a = { lane: r, action: n, hasEagerState: !1, eagerState: null, next: null }; if (ri(e)) ai(t, a); else { var l = e.alternate; if (0 === e.lanes && (null === l || 0 === l.lanes) && null !== (l = t.lastRenderedReducer)) try { var o = t.lastRenderedState, i = l(o, n); if (a.hasEagerState = !0, a.eagerState = i, ir(i, o)) { var u = t.interleaved; return null === u ? (a.next = a, Tl(t)) : (a.next = u.next, u.next = a), void (t.interleaved = a) } } catch (s) { } null !== (n = Pl(e, t, a, r)) && (rs(n, e, r, a = ts()), li(n, t, r)) } } function ri(e) { var t = e.alternate; return e === ho || null !== t && t === ho } function ai(e, t) { go = bo = !0; var n = e.pending; null === n ? t.next = t : (t.next = n.next, n.next = t), e.pending = t } function li(e, t, n) { if (0 !== (4194240 & n)) { var r = t.lanes; n |= r &= e.pendingLanes, t.lanes = n, bt(e, n) } } var oi = { readContext: xl, useCallback: wo, useContext: wo, useEffect: wo, useImperativeHandle: wo, useInsertionEffect: wo, useLayoutEffect: wo, useMemo: wo, useReducer: wo, useRef: wo, useState: wo, useDebugValue: wo, useDeferredValue: wo, useTransition: wo, useMutableSource: wo, useSyncExternalStore: wo, useId: wo, unstable_isNewReconciler: !1 }, ii = { readContext: xl, useCallback: function (e, t) { return Co().memoizedState = [e, void 0 === t ? null : t], e }, useContext: xl, useEffect: $o, useImperativeHandle: function (e, t, n) { return n = null !== n && void 0 !== n ? n.concat([e]) : null, Vo(4194308, 4, Qo.bind(null, t, e), n) }, useLayoutEffect: function (e, t) { return Vo(4194308, 4, e, t) }, useInsertionEffect: function (e, t) { return Vo(4, 2, e, t) }, useMemo: function (e, t) { var n = Co(); return t = void 0 === t ? null : t, e = e(), n.memoizedState = [e, t], e }, useReducer: function (e, t, n) { var r = Co(); return t = void 0 !== n ? n(t) : t, r.memoizedState = r.baseState = t, e = { pending: null, interleaved: null, lanes: 0, dispatch: null, lastRenderedReducer: e, lastRenderedState: t }, r.queue = e, e = e.dispatch = ti.bind(null, ho, e), [r.memoizedState, e] }, useRef: function (e) { return e = { current: e }, Co().memoizedState = e }, useState: Fo, useDebugValue: Go, useDeferredValue: function (e) { return Co().memoizedState = e }, useTransition: function () { var e = Fo(!1), t = e[0]; return e = Jo.bind(null, e[1]), Co().memoizedState = e, [t, e] }, useMutableSource: function () { }, useSyncExternalStore: function (e, t, n) { var r = ho, a = Co(); if (al) { if (void 0 === n) throw Error(l(407)); n = n() } else { if (n = t(), null === zu) throw Error(l(349)); 0 !== (30 & mo) || Ao(r, t, n) } a.memoizedState = n; var o = { value: n, getSnapshot: t }; return a.queue = o, $o(Mo.bind(null, r, o, e), [e]), r.flags |= 2048, Do(9, No.bind(null, r, o, n, t), void 0, null), n }, useId: function () { var e = Co(), t = zu.identifierPrefix; if (al) { var n = Xa; t = ":" + t + "R" + (n = (Ya & ~(1 << 32 - ot(Ya) - 1)).toString(32) + n), 0 < (n = _o++) && (t += "H" + n.toString(32)), t += ":" } else t = ":" + t + "r" + (n = So++).toString(32) + ":"; return e.memoizedState = t }, unstable_isNewReconciler: !1 }, ui = { readContext: xl, useCallback: Yo, useContext: xl, useEffect: qo, useImperativeHandle: Ko, useInsertionEffect: Wo, useLayoutEffect: Ho, useMemo: Xo, useReducer: Oo, useRef: Uo, useState: function () { return Oo(Po) }, useDebugValue: Go, useDeferredValue: function (e) { return Zo(To(), yo.memoizedState, e) }, useTransition: function () { return [Oo(Po)[0], To().memoizedState] }, useMutableSource: jo, useSyncExternalStore: Ro, useId: ei, unstable_isNewReconciler: !1 }, si = { readContext: xl, useCallback: Yo, useContext: xl, useEffect: qo, useImperativeHandle: Ko, useInsertionEffect: Wo, useLayoutEffect: Ho, useMemo: Xo, useReducer: zo, useRef: Uo, useState: function () { return zo(Po) }, useDebugValue: Go, useDeferredValue: function (e) { var t = To(); return null === yo ? t.memoizedState = e : Zo(t, yo.memoizedState, e) }, useTransition: function () { return [zo(Po)[0], To().memoizedState] }, useMutableSource: jo, useSyncExternalStore: Ro, useId: ei, unstable_isNewReconciler: !1 }; function ci(e, t) { try { var n = "", r = t; do { n += V(r), r = r.return } while (r); var a = n } catch (l) { a = "\nError generating stack: " + l.message + "\n" + l.stack } return { value: e, source: t, stack: a, digest: null } } function fi(e, t, n) { return { value: e, source: null, stack: null != n ? n : null, digest: null != t ? t : null } } function di(e, t) { try { console.error(t.value) } catch (n) { setTimeout((function () { throw n })) } } var pi = "function" === typeof WeakMap ? WeakMap : Map; function mi(e, t, n) { (n = Al(-1, n)).tag = 3, n.payload = { element: null }; var r = t.value; return n.callback = function () { Wu || (Wu = !0, Hu = r), di(0, t) }, n } function hi(e, t, n) { (n = Al(-1, n)).tag = 3; var r = e.type.getDerivedStateFromError; if ("function" === typeof r) { var a = t.value; n.payload = function () { return r(a) }, n.callback = function () { di(0, t) } } var l = e.stateNode; return null !== l && "function" === typeof l.componentDidCatch && (n.callback = function () { di(0, t), "function" !== typeof r && (null === Qu ? Qu = new Set([this]) : Qu.add(this)); var e = t.stack; this.componentDidCatch(t.value, { componentStack: null !== e ? e : "" }) }), n } function yi(e, t, n) { var r = e.pingCache; if (null === r) { r = e.pingCache = new pi; var a = new Set; r.set(t, a) } else void 0 === (a = r.get(t)) && (a = new Set, r.set(t, a)); a.has(n) || (a.add(n), e = Cs.bind(null, e, t, n), t.then(e, e)) } function vi(e) { do { var t; if ((t = 13 === e.tag) && (t = null === (t = e.memoizedState) || null !== t.dehydrated), t) return e; e = e.return } while (null !== e); return null } function bi(e, t, n, r, a) { return 0 === (1 & e.mode) ? (e === t ? e.flags |= 65536 : (e.flags |= 128, n.flags |= 131072, n.flags &= -52805, 1 === n.tag && (null === n.alternate ? n.tag = 17 : ((t = Al(-1, 1)).tag = 2, Nl(n, t, 1))), n.lanes |= 1), e) : (e.flags |= 65536, e.lanes = a, e) } var gi = _.ReactCurrentOwner, _i = !1; function Si(e, t, n, r) { t.child = null === e ? Xl(t, null, n, r) : Yl(t, e.child, n, r) } function wi(e, t, n, r, a) { n = n.render; var l = t.ref; return El(t, a), r = Eo(e, t, n, r, l, a), n = xo(), null === e || _i ? (al && n && el(t), t.flags |= 1, Si(e, t, r, a), t.child) : (t.updateQueue = e.updateQueue, t.flags &= -2053, e.lanes &= ~a, Wi(e, t, a)) } function ki(e, t, n, r, a) { if (null === e) { var l = n.type; return "function" !== typeof l || As(l) || void 0 !== l.defaultProps || null !== n.compare || void 0 !== n.defaultProps ? ((e = Ms(n.type, null, r, t, t.mode, a)).ref = t.ref, e.return = t, t.child = e) : (t.tag = 15, t.type = l, Ei(e, t, l, r, a)) } if (l = e.child, 0 === (e.lanes & a)) { var o = l.memoizedProps; if ((n = null !== (n = n.compare) ? n : ur)(o, r) && e.ref === t.ref) return Wi(e, t, a) } return t.flags |= 1, (e = Ns(l, r)).ref = t.ref, e.return = t, t.child = e } function Ei(e, t, n, r, a) { if (null !== e) { var l = e.memoizedProps; if (ur(l, r) && e.ref === t.ref) { if (_i = !1, t.pendingProps = r = l, 0 === (e.lanes & a)) return t.lanes = e.lanes, Wi(e, t, a); 0 !== (131072 & e.flags) && (_i = !0) } } return Ti(e, t, n, r, a) } function xi(e, t, n) { var r = t.pendingProps, a = r.children, l = null !== e ? e.memoizedState : null; if ("hidden" === r.mode) if (0 === (1 & t.mode)) t.memoizedState = { baseLanes: 0, cachePool: null, transitions: null }, Ca(Nu, Au), Au |= n; else { if (0 === (1073741824 & n)) return e = null !== l ? l.baseLanes | n : n, t.lanes = t.childLanes = 1073741824, t.memoizedState = { baseLanes: e, cachePool: null, transitions: null }, t.updateQueue = null, Ca(Nu, Au), Au |= e, null; t.memoizedState = { baseLanes: 0, cachePool: null, transitions: null }, r = null !== l ? l.baseLanes : n, Ca(Nu, Au), Au |= r } else null !== l ? (r = l.baseLanes | n, t.memoizedState = null) : r = n, Ca(Nu, Au), Au |= r; return Si(e, t, a, n), t.child } function Ci(e, t) { var n = t.ref; (null === e && null !== n || null !== e && e.ref !== n) && (t.flags |= 512, t.flags |= 2097152) } function Ti(e, t, n, r, a) { var l = Ra(n) ? za : Pa.current; return l = ja(t, l), El(t, a), n = Eo(e, t, n, r, l, a), r = xo(), null === e || _i ? (al && r && el(t), t.flags |= 1, Si(e, t, n, a), t.child) : (t.updateQueue = e.updateQueue, t.flags &= -2053, e.lanes &= ~a, Wi(e, t, a)) } function Pi(e, t, n, r, a) { if (Ra(n)) { var l = !0; La(t) } else l = !1; if (El(t, a), null === t.stateNode) qi(e, t), $l(t, n, r), Wl(t, n, r, a), r = !0; else if (null === e) { var o = t.stateNode, i = t.memoizedProps; o.props = i; var u = o.context, s = n.contextType; "object" === typeof s && null !== s ? s = xl(s) : s = ja(t, s = Ra(n) ? za : Pa.current); var c = n.getDerivedStateFromProps, f = "function" === typeof c || "function" === typeof o.getSnapshotBeforeUpdate; f || "function" !== typeof o.UNSAFE_componentWillReceiveProps && "function" !== typeof o.componentWillReceiveProps || (i !== r || u !== s) && ql(t, o, r, s), zl = !1; var d = t.memoizedState; o.state = d, Il(t, r, o, a), u = t.memoizedState, i !== r || d !== u || Oa.current || zl ? ("function" === typeof c && (Ul(t, n, c, r), u = t.memoizedState), (i = zl || Bl(t, n, i, r, d, u, s)) ? (f || "function" !== typeof o.UNSAFE_componentWillMount && "function" !== typeof o.componentWillMount || ("function" === typeof o.componentWillMount && o.componentWillMount(), "function" === typeof o.UNSAFE_componentWillMount && o.UNSAFE_componentWillMount()), "function" === typeof o.componentDidMount && (t.flags |= 4194308)) : ("function" === typeof o.componentDidMount && (t.flags |= 4194308), t.memoizedProps = r, t.memoizedState = u), o.props = r, o.state = u, o.context = s, r = i) : ("function" === typeof o.componentDidMount && (t.flags |= 4194308), r = !1) } else { o = t.stateNode, Rl(e, t), i = t.memoizedProps, s = t.type === t.elementType ? i : yl(t.type, i), o.props = s, f = t.pendingProps, d = o.context, "object" === typeof (u = n.contextType) && null !== u ? u = xl(u) : u = ja(t, u = Ra(n) ? za : Pa.current); var p = n.getDerivedStateFromProps; (c = "function" === typeof p || "function" === typeof o.getSnapshotBeforeUpdate) || "function" !== typeof o.UNSAFE_componentWillReceiveProps && "function" !== typeof o.componentWillReceiveProps || (i !== f || d !== u) && ql(t, o, r, u), zl = !1, d = t.memoizedState, o.state = d, Il(t, r, o, a); var m = t.memoizedState; i !== f || d !== m || Oa.current || zl ? ("function" === typeof p && (Ul(t, n, p, r), m = t.memoizedState), (s = zl || Bl(t, n, s, r, d, m, u) || !1) ? (c || "function" !== typeof o.UNSAFE_componentWillUpdate && "function" !== typeof o.componentWillUpdate || ("function" === typeof o.componentWillUpdate && o.componentWillUpdate(r, m, u), "function" === typeof o.UNSAFE_componentWillUpdate && o.UNSAFE_componentWillUpdate(r, m, u)), "function" === typeof o.componentDidUpdate && (t.flags |= 4), "function" === typeof o.getSnapshotBeforeUpdate && (t.flags |= 1024)) : ("function" !== typeof o.componentDidUpdate || i === e.memoizedProps && d === e.memoizedState || (t.flags |= 4), "function" !== typeof o.getSnapshotBeforeUpdate || i === e.memoizedProps && d === e.memoizedState || (t.flags |= 1024), t.memoizedProps = r, t.memoizedState = m), o.props = r, o.state = m, o.context = u, r = s) : ("function" !== typeof o.componentDidUpdate || i === e.memoizedProps && d === e.memoizedState || (t.flags |= 4), "function" !== typeof o.getSnapshotBeforeUpdate || i === e.memoizedProps && d === e.memoizedState || (t.flags |= 1024), r = !1) } return Oi(e, t, n, r, l, a) } function Oi(e, t, n, r, a, l) { Ci(e, t); var o = 0 !== (128 & t.flags); if (!r && !o) return a && Ia(t, n, !1), Wi(e, t, l); r = t.stateNode, gi.current = t; var i = o && "function" !== typeof n.getDerivedStateFromError ? null : r.render(); return t.flags |= 1, null !== e && o ? (t.child = Yl(t, e.child, null, l), t.child = Yl(t, null, i, l)) : Si(e, t, i, l), t.memoizedState = r.state, a && Ia(t, n, !0), t.child } function zi(e) { var t = e.stateNode; t.pendingContext ? Na(0, t.pendingContext, t.pendingContext !== t.context) : t.context && Na(0, t.context, !1), ro(e, t.containerInfo) } function ji(e, t, n, r, a) { return pl(), ml(a), t.flags |= 256, Si(e, t, n, r), t.child } var Ri, Ai, Ni, Mi, Li = { dehydrated: null, treeContext: null, retryLane: 0 }; function Ii(e) { return { baseLanes: e, cachePool: null, transitions: null } } function Fi(e, t, n) { var r, a = t.pendingProps, o = io.current, i = !1, u = 0 !== (128 & t.flags); if ((r = u) || (r = (null === e || null !== e.memoizedState) && 0 !== (2 & o)), r ? (i = !0, t.flags &= -129) : null !== e && null === e.memoizedState || (o |= 1), Ca(io, 1 & o), null === e) return sl(t), null !== (e = t.memoizedState) && null !== (e = e.dehydrated) ? (0 === (1 & t.mode) ? t.lanes = 1 : "$!" === e.data ? t.lanes = 8 : t.lanes = 1073741824, null) : (u = a.children, e = a.fallback, i ? (a = t.mode, i = t.child, u = { mode: "hidden", children: u }, 0 === (1 & a) && null !== i ? (i.childLanes = 0, i.pendingProps = u) : i = Is(u, a, 0, null), e = Ls(e, a, n, null), i.return = t, e.return = t, i.sibling = e, t.child = i, t.child.memoizedState = Ii(n), t.memoizedState = Li, e) : Di(t, u)); if (null !== (o = e.memoizedState) && null !== (r = o.dehydrated)) return function (e, t, n, r, a, o, i) { if (n) return 256 & t.flags ? (t.flags &= -257, Ui(e, t, i, r = fi(Error(l(422))))) : null !== t.memoizedState ? (t.child = e.child, t.flags |= 128, null) : (o = r.fallback, a = t.mode, r = Is({ mode: "visible", children: r.children }, a, 0, null), (o = Ls(o, a, i, null)).flags |= 2, r.return = t, o.return = t, r.sibling = o, t.child = r, 0 !== (1 & t.mode) && Yl(t, e.child, null, i), t.child.memoizedState = Ii(i), t.memoizedState = Li, o); if (0 === (1 & t.mode)) return Ui(e, t, i, null); if ("$!" === a.data) { if (r = a.nextSibling && a.nextSibling.dataset) var u = r.dgst; return r = u, Ui(e, t, i, r = fi(o = Error(l(419)), r, void 0)) } if (u = 0 !== (i & e.childLanes), _i || u) { if (null !== (r = zu)) { switch (i & -i) { case 4: a = 2; break; case 16: a = 8; break; case 64: case 128: case 256: case 512: case 1024: case 2048: case 4096: case 8192: case 16384: case 32768: case 65536: case 131072: case 262144: case 524288: case 1048576: case 2097152: case 4194304: case 8388608: case 16777216: case 33554432: case 67108864: a = 32; break; case 536870912: a = 268435456; break; default: a = 0 }0 !== (a = 0 !== (a & (r.suspendedLanes | i)) ? 0 : a) && a !== o.retryLane && (o.retryLane = a, Ol(e, a), rs(r, e, a, -1)) } return ys(), Ui(e, t, i, r = fi(Error(l(421)))) } return "$?" === a.data ? (t.flags |= 128, t.child = e.child, t = Ps.bind(null, e), a._reactRetry = t, null) : (e = o.treeContext, rl = sa(a.nextSibling), nl = t, al = !0, ll = null, null !== e && (Qa[Ka++] = Ya, Qa[Ka++] = Xa, Qa[Ka++] = Ga, Ya = e.id, Xa = e.overflow, Ga = t), t = Di(t, r.children), t.flags |= 4096, t) }(e, t, u, a, r, o, n); if (i) { i = a.fallback, u = t.mode, r = (o = e.child).sibling; var s = { mode: "hidden", children: a.children }; return 0 === (1 & u) && t.child !== o ? ((a = t.child).childLanes = 0, a.pendingProps = s, t.deletions = null) : (a = Ns(o, s)).subtreeFlags = 14680064 & o.subtreeFlags, null !== r ? i = Ns(r, i) : (i = Ls(i, u, n, null)).flags |= 2, i.return = t, a.return = t, a.sibling = i, t.child = a, a = i, i = t.child, u = null === (u = e.child.memoizedState) ? Ii(n) : { baseLanes: u.baseLanes | n, cachePool: null, transitions: u.transitions }, i.memoizedState = u, i.childLanes = e.childLanes & ~n, t.memoizedState = Li, a } return e = (i = e.child).sibling, a = Ns(i, { mode: "visible", children: a.children }), 0 === (1 & t.mode) && (a.lanes = n), a.return = t, a.sibling = null, null !== e && (null === (n = t.deletions) ? (t.deletions = [e], t.flags |= 16) : n.push(e)), t.child = a, t.memoizedState = null, a } function Di(e, t) { return (t = Is({ mode: "visible", children: t }, e.mode, 0, null)).return = e, e.child = t } function Ui(e, t, n, r) { return null !== r && ml(r), Yl(t, e.child, null, n), (e = Di(t, t.pendingProps.children)).flags |= 2, t.memoizedState = null, e } function Vi(e, t, n) { e.lanes |= t; var r = e.alternate; null !== r && (r.lanes |= t), kl(e.return, t, n) } function Bi(e, t, n, r, a) { var l = e.memoizedState; null === l ? e.memoizedState = { isBackwards: t, rendering: null, renderingStartTime: 0, last: r, tail: n, tailMode: a } : (l.isBackwards = t, l.rendering = null, l.renderingStartTime = 0, l.last = r, l.tail = n, l.tailMode = a) } function $i(e, t, n) { var r = t.pendingProps, a = r.revealOrder, l = r.tail; if (Si(e, t, r.children, n), 0 !== (2 & (r = io.current))) r = 1 & r | 2, t.flags |= 128; else { if (null !== e && 0 !== (128 & e.flags)) e: for (e = t.child; null !== e;) { if (13 === e.tag) null !== e.memoizedState && Vi(e, n, t); else if (19 === e.tag) Vi(e, n, t); else if (null !== e.child) { e.child.return = e, e = e.child; continue } if (e === t) break e; for (; null === e.sibling;) { if (null === e.return || e.return === t) break e; e = e.return } e.sibling.return = e.return, e = e.sibling } r &= 1 } if (Ca(io, r), 0 === (1 & t.mode)) t.memoizedState = null; else switch (a) { case "forwards": for (n = t.child, a = null; null !== n;)null !== (e = n.alternate) && null === uo(e) && (a = n), n = n.sibling; null === (n = a) ? (a = t.child, t.child = null) : (a = n.sibling, n.sibling = null), Bi(t, !1, a, n, l); break; case "backwards": for (n = null, a = t.child, t.child = null; null !== a;) { if (null !== (e = a.alternate) && null === uo(e)) { t.child = a; break } e = a.sibling, a.sibling = n, n = a, a = e } Bi(t, !0, n, null, l); break; case "together": Bi(t, !1, null, null, void 0); break; default: t.memoizedState = null }return t.child } function qi(e, t) { 0 === (1 & t.mode) && null !== e && (e.alternate = null, t.alternate = null, t.flags |= 2) } function Wi(e, t, n) { if (null !== e && (t.dependencies = e.dependencies), Iu |= t.lanes, 0 === (n & t.childLanes)) return null; if (null !== e && t.child !== e.child) throw Error(l(153)); if (null !== t.child) { for (n = Ns(e = t.child, e.pendingProps), t.child = n, n.return = t; null !== e.sibling;)e = e.sibling, (n = n.sibling = Ns(e, e.pendingProps)).return = t; n.sibling = null } return t.child } function Hi(e, t) { if (!al) switch (e.tailMode) { case "hidden": t = e.tail; for (var n = null; null !== t;)null !== t.alternate && (n = t), t = t.sibling; null === n ? e.tail = null : n.sibling = null; break; case "collapsed": n = e.tail; for (var r = null; null !== n;)null !== n.alternate && (r = n), n = n.sibling; null === r ? t || null === e.tail ? e.tail = null : e.tail.sibling = null : r.sibling = null } } function Qi(e) { var t = null !== e.alternate && e.alternate.child === e.child, n = 0, r = 0; if (t) for (var a = e.child; null !== a;)n |= a.lanes | a.childLanes, r |= 14680064 & a.subtreeFlags, r |= 14680064 & a.flags, a.return = e, a = a.sibling; else for (a = e.child; null !== a;)n |= a.lanes | a.childLanes, r |= a.subtreeFlags, r |= a.flags, a.return = e, a = a.sibling; return e.subtreeFlags |= r, e.childLanes = n, t } function Ki(e, t, n) { var r = t.pendingProps; switch (tl(t), t.tag) { case 2: case 16: case 15: case 0: case 11: case 7: case 8: case 12: case 9: case 14: return Qi(t), null; case 1: case 17: return Ra(t.type) && Aa(), Qi(t), null; case 3: return r = t.stateNode, ao(), xa(Oa), xa(Pa), co(), r.pendingContext && (r.context = r.pendingContext, r.pendingContext = null), null !== e && null !== e.child || (fl(t) ? t.flags |= 4 : null === e || e.memoizedState.isDehydrated && 0 === (256 & t.flags) || (t.flags |= 1024, null !== ll && (is(ll), ll = null))), Ai(e, t), Qi(t), null; case 5: oo(t); var a = no(to.current); if (n = t.type, null !== e && null != t.stateNode) Ni(e, t, n, r, a), e.ref !== t.ref && (t.flags |= 512, t.flags |= 2097152); else { if (!r) { if (null === t.stateNode) throw Error(l(166)); return Qi(t), null } if (e = no(Jl.current), fl(t)) { r = t.stateNode, n = t.type; var o = t.memoizedProps; switch (r[da] = t, r[pa] = o, e = 0 !== (1 & t.mode), n) { case "dialog": Dr("cancel", r), Dr("close", r); break; case "iframe": case "object": case "embed": Dr("load", r); break; case "video": case "audio": for (a = 0; a < Mr.length; a++)Dr(Mr[a], r); break; case "source": Dr("error", r); break; case "img": case "image": case "link": Dr("error", r), Dr("load", r); break; case "details": Dr("toggle", r); break; case "input": Y(r, o), Dr("invalid", r); break; case "select": r._wrapperState = { wasMultiple: !!o.multiple }, Dr("invalid", r); break; case "textarea": ae(r, o), Dr("invalid", r) }for (var u in be(n, o), a = null, o) if (o.hasOwnProperty(u)) { var s = o[u]; "children" === u ? "string" === typeof s ? r.textContent !== s && (!0 !== o.suppressHydrationWarning && Zr(r.textContent, s, e), a = ["children", s]) : "number" === typeof s && r.textContent !== "" + s && (!0 !== o.suppressHydrationWarning && Zr(r.textContent, s, e), a = ["children", "" + s]) : i.hasOwnProperty(u) && null != s && "onScroll" === u && Dr("scroll", r) } switch (n) { case "input": H(r), J(r, o, !0); break; case "textarea": H(r), oe(r); break; case "select": case "option": break; default: "function" === typeof o.onClick && (r.onclick = Jr) }r = a, t.updateQueue = r, null !== r && (t.flags |= 4) } else { u = 9 === a.nodeType ? a : a.ownerDocument, "http://www.w3.org/1999/xhtml" === e && (e = ie(n)), "http://www.w3.org/1999/xhtml" === e ? "script" === n ? ((e = u.createElement("div")).innerHTML = " + + + + +{{ end }} +``` + +* Replace the content of `swagger.html` with the following: + +``` + {{ define "swaggerPage" }} + {{ template "header" .}} + + {{ template "navigation" . }} +
+
+ +
+
+ {{ template "footer" .}} + + + +{{ end }} +``` + +* Restart your dashboard service + +* Browse your portal documentation + +Tyk Portal Catalogue API Documentation with ReDoc diff --git a/tyk-developer-portal/tyk-portal-classic/customise/customising-using-dashboard.mdx b/tyk-developer-portal/tyk-portal-classic/customise/customising-using-dashboard.mdx new file mode 100644 index 000000000..8f60fb1c0 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/customise/customising-using-dashboard.mdx @@ -0,0 +1,123 @@ +--- +title: "Customize Pages with CSS and JavaScript" +order: 3 +noindex: True +sidebarTitle: "Customise Pages with CSS and JavaScript" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +The main customization that can be done with the Tyk Dashboard is via the CSS Editor. + +JS customization is also available in a programmatic way. + +#### Step 1: Open CSS Editor + +Click **CSS** from the **Portal Management** menu. + +Portal management menu + +#### Step 2: Make CSS Amendments + +In the CSS Editor, add the classes that you would like to override in the home page. For Tyk Cloud and Multi-Cloud users, this will already be filled in with some initial overrides for you: + +Portal CSS editor + +#### Step 3: Make Email CSS Amendments + +Email CSS editor + +If you wish to customize how emails are displayed to end-users, then you can also add new classes to the Email CSS editor, these classes will be added in-line to the email that is sent out. + +Once you have finished making your changes, click **Update** and the new CSS will be available on your site. + +### Updating CSS via API +Alternatively, as always, you can perform the above actions with an API call instead of through the Dashboard UI. + +First, we'll need to get the block ID of the CSS component in order to update it. This is stored in Mongo by the Dashboard. +To get the block ID, we have to make a REST call to the Dashboard API. + +To do so, run this `curl` command: + +```{.copyWrapper} +curl www.tyk-test.com:3000/api/portal/css \ +-H "Authorization:{DASHBOARD_API_KEY}" +``` +Response: +```{.copyWrapper} +{ + "email_css": "", + "id": "{CSS_BLOCK_ID}, + "org_id": "{ORG_ID}", + "page_css": ".btn-success {background-color: magenta1}" +} +``` +Now we can use the `id` and the `org_id` to update the CSS. +The below `curl` command will update the CSS for a specific organization. + +```{.copyWrapper} +curl -X PUT http://tyk-dashboard.com/api/portal/css \ + -H "authorization:{DASHBOARD_API_KEY}" \ + -d '{ + "email_css": "", + "id": "{CSS_BLOCK_ID}, + "org_id": "{ORG_ID}", + "page_css": ".btn-success {background-color: magenta}" + }' +``` + + [1]: /img/dashboard/portal-management/portal_man_css.png + [2]: /img/dashboard/portal-management/portal_site_css.png + + ### Updating JavaScript via API + + In order to initialize the portal JS object in the database use the following request where `console.log(1)` should be replaced by your JS snippet: + + ```{.copyWrapper} +curl -X POST www.tyk-test.com:3000/api/portal/js \ +-H "Authorization:{DASHBOARD_API_KEY}" \ +-d '{"page_js": "console.log(1)"}' +``` + +Request: +```{.copyWrapper} +{ + "page_js": "console.log(1)" +} +``` + +Response: +```{.copyWrapper} +{ + "Status": "OK", + "Message": "609b71df21c9371dd5906ec1", + "Meta": null +} +``` + +The endpoint will return the ID of the portal JS object, this can be used to update it. + + ```{.copyWrapper} +curl www.tyk-test.com:3000/api/portal/js \ +-H "Authorization:{DASHBOARD_API_KEY}" \ +--data '{"page_js": "console.log(2)", "id": "609b71df21c9371dd5906ec1"}' +``` + +Request: +```{.copyWrapper} +{ + "page_js": "console.log(2)", + "id": "609b71df21c9371dd5906ec1" +} +``` + +Response: +```{.copyWrapper} +{ + "page_js": "console.log(1)" +} +``` + +The JavaScript snippet that's added through this endpoint is injected at the bottom of the portal page using a ` + + + + +

Type something in the input field to search the table for first names, last names or emails:

+ +

+ +
+ +``` + + +And save. + +Now visit the portal at "http://dashboard-host:3000/portal/custom" + +custom_page_display + +You now have a searchable Input box that will dynamically filter the results of the table. diff --git a/tyk-developer-portal/tyk-portal-classic/customise/developer-meta-data.mdx b/tyk-developer-portal/tyk-portal-classic/customise/developer-meta-data.mdx new file mode 100644 index 000000000..94c516861 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/customise/developer-meta-data.mdx @@ -0,0 +1,20 @@ +--- +title: "Customize the Developer Signup Form" +order: 4 +noindex: True +sidebarTitle: "Customise the Developer Signup Form" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +When a developer signs up to your developer Portal, you might wish to capture more information about the developer than is supplied by the default form. To enable new fields in this form (they are automatically added to the form as you add them), go to the **Portal Management > Settings** screen, and edit the **Sign up form customization** section: + +Tyk developer portal sign up form customization + +### Developer metadata and keys + +All developer metadata is automatically added to the key metadata when a token is generated, this can be useful if you need to add more information to your upstream requests. + +A developer username will also automatically be made the alias for an API token so that it is easy to identify in the analytics. diff --git a/tyk-developer-portal/tyk-portal-classic/developer-profiles.mdx b/tyk-developer-portal/tyk-portal-classic/developer-profiles.mdx new file mode 100644 index 000000000..66526c3ab --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/developer-profiles.mdx @@ -0,0 +1,160 @@ +--- +title: "Developer Profiles" +order: 2 +noindex: True +sidebarTitle: "Developer Profiles" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +Users that are signed up to your portal are called "Developers", these users have access to a Dashboard page which show them their API usage over the past 7 days as well as the policy and quota limits on their relevant keys. + +Developers can sign up to multiple APIs using the API catalog. + +Developer accounts belong to an organization ID, so accounts cannot be shared across organizations in a Tyk Dashboard setup. + +### Navigate to the Portal Developers Section + +Developer Menu + +#### Select Add Developer + +Developer Profile add + +### Add Basic Details + +Developer Profile Create Details + +### Developer Profile Overview + +The first panel in a developer profile will show you an avatar (if they have a Gravatar-enabled email address), as well as the basic fields of their signup: + +Developer profile detail + +### Developer Usage + +The next panel will show you their apI usage as an aggregate for all the tokens that they have generated with their developer access: + +Developer usage graph + +### Developer Keys + +In this panel, you will be able to see the various Keys the developer has access to, and the policies that are connected to the individual Key. + + + +From version 1.9, you can now apply multiple policies to an individual Key. + + + +To drill down into the specific usage patterns for each Key, click **ANALYTICS** for the Key. + +Developer Keys + +### Add a New Key + +To subscribe a developer to a new Key, from the Edit Developer screen, click **New Key**. From the pop-up screen, select one or more policies from the drop-down list and click **Request Key**. + + New Key Request + +### Changing Developer Policy Keys + +#### Step 1: View the Developer Profile + +Browse to the developers list view and select the developer that you wish to manage. + +Developer profile detail + +#### Step 2: View Keys List + +This sections shows you the Keys and the policies connected to them. This view will always try to match the access level to a catalog entry, if the policy assigned to a developer is not in the catalog, the entry will read "(No Catalog Entry)". We recommend that all policy levels are in your catalog, even if they are not all live. + +#### Step 3: Click Options + +From the Options drop-down for the Key, select **Change Policy**. + +Keys Sections + +#### Step 4: Select the New Policy + +Select a new policy to add to your Key from the **Policies** drop-down list. You can also remove existing policies connected to the Key. + +Change policy drop down list + +#### Step 5: Save the Change + +Click **CHANGE KEY POLICY** to save the changes. + +### Developer OAuth Clients + + +### Edit the Developer Profile + +All fields in the profile are editable. In this section you can select a field and modify that data for the developer. This will not affect any tokens they may have, but it will affect how it appears in their Developer Dashboard in your Portal. + +Developer edit form + +Developers can edit this data themselves in their accounts section. + +### Search for a Developer + +You can search for a developer (by email address) by entering their address in the Search field. + +This option is only available from Dashboard v1.3.1.2 and onwards. + +Developer Profile Search + +### Developer Edit Profile + +Once logged in, a developer can edit their profile. Select **Edit profile** from the **Account** menu drop-down list. + +Manage Profile + +A developer can change the following: +* Email +* Change Password +* Name +* Telephone +* Country Location + +### Reset Developer Password + +If a developer has forgotten their password, they can request a password reset email from the Login screen. + +Login Screen + +1. Click **Request password reset** +2. Enter your email address and click **Send Password reset email** + +Email Reset + +You will be sent an email with a link to reset your Developer password. Enter your new password and click **Update**. You can then login with your new details. + + + +Your password must be a minimum of 6 characters. + + + +Confirm password + + + + + + [1]: /img/dashboard/portal-management/developer_menu_2.5.png + [2]: /img/dashboard/portal-management/add_developer_2.5.png + [3]: /img/dashboard/portal-management/developer_details_2.5.png + [4]: /img/dashboard/portal-management/developer_overview_2.5.png + [5]: /img/dashboard/portal-management/developer_usage_2.5.png + [6]: /img/dashboard/portal-management/developer_subs_2.5.png + [7]: /img/dashboard/portal-management/developer_edit_2.5.png + [8]: /img/dashboard/portal-management/developer_search_2.5.png + [13]: /img/dashboard/portal-management/developer_edit_2.5.png + [14]: /img/dashboard/portal-management/keys_dev_profile.png + [15]: /img/dashboard/portal-management/change_key_policy.png + [16]: /img/dashboard/portal-management/new_key_request.png + + diff --git a/tyk-developer-portal/tyk-portal-classic/dynamic-client-registration.mdx b/tyk-developer-portal/tyk-portal-classic/dynamic-client-registration.mdx new file mode 100644 index 000000000..d2d96f98a --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/dynamic-client-registration.mdx @@ -0,0 +1,43 @@ +--- +title: "Classic Portal - Dynamic Client Registration" +order: 3 +noindex: True +sidebarTitle: "Overview" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +## OAuth 2.0 Dynamic Client Registration Protocol (DCR) + +Available from version 3.2.0 onwards. + +## What is Dynamic Client Registration? + +DCR is a protocol of the Internet Engineering Task Force put in place to set standards in the dynamic registration of clients with authorization servers. +We will go into the specifics of how it works in the context of Tyk, but if you are interested in reading the full RFC, go to: https://tools.ietf.org/html/rfc7591 + +## Why should I use it? + +DCR is a way for you to integrate your developer portal with an external identity provider such as Keycloak, Gluu, Auth0, Okta etc... +The portal developer won't notice a difference. However when they create the app via Tyk Developer portal, Tyk will dynamically register that client on your authorization server. This means that it is the Authorization Server who will issue issue the Client ID and Client Secret for the app. +Some of our users leverage external Identity Providers because they provide a variety of features to support organizations in managing identity in one place across all their stack. + +This feature is optional and you can still have a great level of security only using Tyk as your authorization server. + +## Enabling Dynamic Client Registration + +We provide guides for the following identity providers: + +- [Gluu](/tyk-developer-portal/tyk-portal-classic/gluu-dcr). Official docs are available [here](https://gluu.org/docs/gluu-server/4.0/admin-guide/openid-connect/#dynamic-client-registration). +- [Curity](/tyk-developer-portal/tyk-portal-classic/curity-dcr). Official docs are available [here](https://curity.io/docs/idsvr/latest/token-service-admin-guide/dcr.html). +- [Keycloak](/tyk-developer-portal/tyk-portal-classic/keycloak-dcr). Official docs are available [here](https://github.com/keycloak/keycloak-documentation/blob/master/securing_apps/topics/client-registration.adoc). +- [OKTA](/tyk-developer-portal/tyk-portal-classic/okta-dcr). Official docs are available [here](https://developer.okta.com/docs/reference/api/oauth-clients/). + + +In case your provider isn't on the list, use the "Other" provider option in the DCR settings. This mode would keep the interaction with your IDP as standard possible. Note that not all IDPs fully implement the standard. + +## Troubleshooting + +The DCR functionality abstracts most of the errors to the end user (in this case, the developer). In order to diagnose issues between Tyk and your IDP, please refer to the Tyk Dashboard logs. diff --git a/tyk-developer-portal/tyk-portal-classic/gluu-dcr.mdx b/tyk-developer-portal/tyk-portal-classic/gluu-dcr.mdx new file mode 100644 index 000000000..65907c45a --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/gluu-dcr.mdx @@ -0,0 +1,147 @@ +--- +title: "Step by step guide using Gluu" +order: 3 +noindex: True +sidebarTitle: "Step by step guide using Gluu" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +We are going walk you through a basic integration of Tyk with [Gluu](https://gluu.org/) using the [OpenID Connect Dynamic Client Registration protocol](https://tools.ietf.org/html/rfc7591). Our current implementation provides support for the client credentials flow with support for JWT. + +The user journey is as follow: + +1. A developer signs up and creates a Dynamic Client Registration provider using your Developer Portal. + +2. Tyk sends the Dynamic Client Registration call to your IDP. The IDP replies with the client ID and secret. + +3. Using that information, the developer (or the application) triggers a call to the token endpoint of the IDP. + +4. Your developer (or the application) then triggers a call to Tyk, using the token that was generated by the IDP. Tyk validates this token using the JWKS provided by the IDP. + +### Requirements + +- A Gluu installation, more details [here](https://gluu.org/get-started/). +- A [Tyk Self Managed installation](/tyk-self-managed/install) (Gateway + Dashboard). + +### Getting started with Gluu + +In order to get started with Dynamic Client Registration you’ll need to get the OpenID Connect registration endpoint. Open your Gluu dashboard and select the "Configuration" section. Select "JSON Configuration" and toggle the "OxAuth Configuration" tab. + +Step 1 + +In this view you will find the registration endpoint: + +Step 2 + +Another endpoint that will be relevant for your setup is the Well-Known configuration endpoint. Keep both URLs handy as you’ll use them for our next steps. This endpoint typically looks as follows: https://gluu-server/.well-known/openid-configuration + +Because of known issues with Tyk’s JWT driver, you’ll set specific algorithms for the JWKS endpoint. In the same "OxAuth Configuration" tab, scroll down to "jwksAlgorithmsSupported" and select the following options: + +Step 3 + +Click "Save OxAuth Configuration" afterwards. + +For more information on this particular issue please check [this thread](https://support.gluu.org/authentication/8780/wrong-size-of-ec-x-value-in-jwks_uri-while-using-openid/) in the Gluu forum. + +### Setting up Tyk + +Now you're ready to set up Tyk. For compatibility reasons, check your `tyk_analytics.conf` and make sure that a proper `oauth_redirect_uri_separator` parameter is set. You can use the following value: + +```json + "oauth_redirect_uri_separator": ";", +``` + +Remember to restart the service after applying the above change. + +Now open the Tyk Dashboard and click **APIs** under **System Management**. Create a new API called "Gluu API": + +Step 4 + +After the first part of the API creation form was filled, click on "Configure API" and set the authentication settings as follows: + +Step 5 + + + +Where do I get the proper JWKS URI for my Gluu environment? + +The JWKS URI is a required field in the `.well-known/openid-configuration` endpoint of your OpenID Connect Provider metadata. Typically found as `"jwks_uri"`. Please see the spec https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse for further information. + + + +For the **Identity Source** field use `"client_id"` and for **Policy Field Name** use `"pol"`. + +Click "Save" and switch to the "Policies" button under "System Management". Once in this section, click on "Create a Policy" and call it "Gluu Policy". Use the default values for this one. Remember to select the previously created "Gluu API" in the access rights section. You will also need to set an expiration setting for the keys. + +After the policy is ready, switch back to the API settings and make sure that the API is using the appropriate policy: + +Step 6 + +Now you’re ready to add this API to the developer portal. Switch to the "Catalog" section under "Portal Management" on the navigation menu. Click on "Add New API", set a name for it and select the newly created policy. For this example use "Gluu Policy": + +Step 7 + +Hit "Save" and click on the recently created item again, switch to the "Settings" tab that’s next to "API Details". In "API Details" toggle the "Override global settings" option. + + + +Tyk lets you set global portal settings that apply to **all portal-listed APIs**, in this guide we assume you’re enabling and setting up DCR for a single API. In case you want to enable DCR for all the APIs, you should go to the **Settings** section under **Portal Management**, and in the **API Access** tab you can enter your DCR settings there. + + + +Once the "Override global settings" option is toggled, scroll down to the DCR section in the bottom and enter the following settings: + +Step 8 + +**Providers:** Different providers might implement the standard in slightly different ways. Tyk provides a specific driver for each one. For IDPs that aren’t on the list use the "Other" option. For this guide, pick "Gluu". + +**Grant Types:** The [OAuth 2.0 grant types](/api-management/authentication/oauth-2) types that will be used by the client, see the [specification](https://openid.net/specs/openid-connect-registration-1_0.html#rfc.section.2) for more details. Set "Client Credentials". + +**Token Endpoint Auth Method:** defines the way the client will authenticate against the token endpoint. Use "Client Secret - Post". + +**Response Types:** OAuth 2.0 response types that will be used by the client. Set **Token**. + +**Identity Provider Host:** Base IDP URL, e.g. `https://gluu-server/` + +**Client Registration Endpoint:** OpenID Connect client registration endpoint. The value we use is `https://gluu-server/oxauth/restv1/register` + +This value is found in your well-known discovery document as `registration_endpoint`. The well-known location URL is typically `https://gluu-server/.well-known/openid-configuration` (replace "gluu-server" with your hostname). + +**Initial Registration Access Token:** the token that’s used to register new clients, this was generated in the early steps of the guide. + +### Testing the flow + +Now that both Tyk and Gluu are ready you can try the complete flow. Click "Developers" under "Portal Management", then click "Add developer" and enter some basic information here to create a developer user. + +After the developer is created, open the portal, click on the "OAuth Clients" navigation bar button and follow the wizard: + +Step 9 + +After clicking "Create first OAuth Client" you’ll see your previously created "Gluu API". Select it and click "Save and continue". The following screen will require you to enter a client name. It’s possible to set redirect URLs if you also plan to use this client for other flow types. This setting can be left blank for the purposes of this example. + +Step 10 + +Once you click "Create", Tyk will trigger a registration on your IDP and the details of your client will show up: + +Step 11 + +If you check the Gluu dashboard you will see new client (named "GluuClient"): + +Step 12 + +The next step is to generate a token and use it for accessing your "Gluu API". you can use Postman for this. You will need the token URL which it’s also present in the Well-Known URI of your organization. The field is named `"token_endpoint"`. +For this example use the following: https://gluu-server/oxauth/restv1/token + +Your Postman request should contain the following body, where `"client_id"` and `"client_secret"` are the values you got from the developer portal: + +Step 13 + +Note that you aren’t using any additional headers for this request, the client credentials are enough. + +Once you get a response from the IDP, you can copy the `"access_token"` and use it to access your "Gluu API", this request will be proxied by Tyk: + +Step 14 + diff --git a/tyk-developer-portal/tyk-portal-classic/graphql.mdx b/tyk-developer-portal/tyk-portal-classic/graphql.mdx new file mode 100644 index 000000000..76ffae5c2 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/graphql.mdx @@ -0,0 +1,51 @@ +--- +title: "Developer Portal GraphQL" +description: "How to publish GraphQL APIs to your Tyk Developer Portal" +keywords: "GraphQL, Playground, CORS, UDG" +order: 7 +noindex: True +sidebarTitle: "GraphQL with Classic Portal" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +As of Tyk v3.0.0, you can now publish GraphQL APIs, including [Universal Data Graph](/api-management/data-graph#overview) APIs(UDG) to the Tyk Developer Portal. + +When you do that, your API consumers can navigate through a GraphQL Playground, with an IDE complete with Intellisense. + +Portal GraphQL Playground + +## Video Walkthrough + +We have a YouTube walkthrough of how to publish a GraphQL API to your Developer Portal: + + + +## How To Set Up + +Simply create a GraphQL or Universal Data Graph API, create a Policy which protects it, and then publish it to the Developer Portal Catalog. + +In the "Create a Catalog" section, at the bottom, make sure you enable the "Display Playground" + + +Portal GraphQL Playground Setup + +And then, when your API consumers are on the Developer Portal Catalog and click on View Documentation, they will be taken to the GraphQL Playground. + +Portal GraphQL Playground View Docs + + +## Protected GraphQL Catalog + +If you have a protected API, your users won't be able to inspect the GraphQL schema or make API calls until they add their API Key to the Headers section: + +Portal GraphQL Playground Header Injection + +## CORS + +You may have to enable the following CORS settings in the "Advanced Options" of the API Designer to allow your consumers to access the GraphQL Playground: + + +Portal GraphQL Playground CORS diff --git a/tyk-developer-portal/tyk-portal-classic/key-requests.mdx b/tyk-developer-portal/tyk-portal-classic/key-requests.mdx new file mode 100644 index 000000000..b88cedd82 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/key-requests.mdx @@ -0,0 +1,47 @@ +--- +title: "Key Requests" +noindex: True +sidebarTitle: "Key Requests" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +## Key Requests + +A key request is a record that is generated when a developer requests an access token for an API published in the API Catalog. The Key request encompasses the following information: + +- The policy of which access is being requested +- The developer doing the requesting +- The catalog entry in question +- The reasoning of why the developer should have access (these are dynamic fields and can be configured) + +When a developer requests access to an API Catalog entry, this key request represents that request for access. The key request can then be acted on, either by the portal itself, or by an administrator. The key request does not grant a token yet, it simply marks the fact that a token has been requested and why. + +Tyk enables you to manage this flow in a few ways: + +- Auto-approve the key request. +- Have an admin approve the key-request. +- Hand off to a third-party system to manage the key-request (e.g. for billing or additional user validation). This is done via WebHooks or via the "Redirect Key Request" Portal Setting. + +## Key Approval +Once a key request is created, one of two things can be done to it: + +- It can be approved: Covered below +- It can be declined: In which case the request is deleted. + +A key request can be created using the Dashboard API too, in fact, the Key Request mechanism is a great way to create a mapping between an identity (a developer) and a token, and managing that process. + +### Secure Key Approval + +By default, the Key Approval flow is straight forward. Once a Key Request is approved, the Developer will be notified via an email which contains the API Key. + +As of Dashboard version `3.1.0`, it is now possible to turn on a more secure key approval flow. Once the "Request Key Approval" setting is enabled, we see an additional setting: +secure_key_approval_setting + +With this feature turn on, we prevent the API key from being sent in plain text via email. Instead, the once a key request is approved, the Developer will be sent a confirmation link in an email that directs them to the Portal: +secure_key_approval_email + +After clicking the `Generate Key` link and logging into the Portal, the key becomes available to the user: +secure_key_approval_generate \ No newline at end of file diff --git a/tyk-developer-portal/tyk-portal-classic/keycloak-dcr.mdx b/tyk-developer-portal/tyk-portal-classic/keycloak-dcr.mdx new file mode 100644 index 000000000..9349f80e1 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/keycloak-dcr.mdx @@ -0,0 +1,159 @@ +--- +title: "Step by step guide using Keycloak" +order: 1 +noindex: True +sidebarTitle: "Step by step guide using Keycloak" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +We are going walk you through a basic integration of Tyk with Keycloak using the [OpenID Connect Dynamic Client Registration protocol](https://tools.ietf.org/html/rfc7591). Our current implementation provides support for the client credentials flow with support for JWT. To the developer it works like this: + +1. An API with its corresponding security policy is created in Tyk. It is then added to the Developer Portal Catalog. + +2. A developer signs up and creates a Dynamic Client Registration provider using the Developer Portal. +Tyk sends the Dynamic Client Registration call to your IDP. The IDP replies with the client ID and secret. + +3. Using the previous information, the developer (or your application) triggers a call to the token endpoint of the IDP. +The developer (or your application) then triggers a call to Tyk, using the token that was generated by the IDP. Tyk validates this token using the JWKS provided by the IDP. + +### Requirements + +- A [Keycloak](https://www.keycloak.org/) instance. +- A [Tyk Self Managed installation](/tyk-self-managed/install) (Gateway + Dashboard). + +### Getting started with Keycloak + +To get started with Dynamic Client Registration in Keycloak you'll need to generate an [initial access token](https://openid.net/specs/openid-connect-registration-1_0.html#Terminology) using the Keycloak Administration Console. After logging in, click **Realm settings** under **Configure** and select the **Client Registration** tab: + +Step 1 + +To generate an initial access token, click **Create** and set the expiration time and maximum number of clients to be created using this token: + +Step 2 + +Click **Save** and the token will be created. Keep it safe as you'll use this token to configure Tyk. + +### Setting up Tyk + +Now you're ready to set up Tyk. For compatibility reasons, check your `tyk_analytics.conf` and make sure that a proper `oauth_redirect_uri_separator` parameter is set. You may use the following value: + +```json + "oauth_redirect_uri_separator": ";", +``` + +**Note:** If you're using a self-signed certificate on your Keycloak instance, you will need to set additional flags on both gateway and dashboard. For skipping DCR endpoint SSL verification, add the following flag to `tyk_analytics.conf`: + +```json + "dcr_ssl_insecure_skip_verify": true +``` + +Also add the following flag to `tyk.conf`, this will instruct the gateway to skip SSL verification when the JWT middleware is in use, particularly when JWKS are retrieved from your IDP: + +```json + "jwt_ssl_insecure_skip_verify": true +``` + +Remember to restart the services after applying the above changes. + +Open the Tyk Dashboard and click **APIs** under **System Management**. Create a new API called "Keycloak API": + +Step 3 + +Complete first part of the API creation form, then click **Configure API** and set the Authentication mode as in the image below: + +Step 4 + + + +Where do I get the proper JWKS URI for my Keycloak environment? + +The JWKS URI is a required field in the `.well-known/openid-configuration` endpoint of your OpenID Connect Provider metadata. Please see the [OpenID spec](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse) for further information. + + + + + +For the **Identity Source** field use `"sub"` and for **Policy Field Name** use `"pol"`. + +1. Click **Save** +2. Select **Policies** under **System Management** +3. Click **Create a Policy** and call it **Keycloak Policy**. Use the default values for this policy. +4. In the **Access rights** section, select your previously created **Keycloak API**. You will also need to enter an expiration setting for your keys. + +After the policy is created, switch back to the API settings and make sure that the API is using your **Keycloak API** policy: + +Step 5 + +Now you're ready to add this API to the Developer Portal. +1. Click **Catalog** under **Portal Management** on the navigation menu. +2. Click **Add New API**, enter a name for it and select the newly created policy. Again, you will use **Keycloak Policy**: + +Step 6 + +1. Click **Save** then open the API added again +2. Open the **Settings** tab. +3. In **API Details** select the **Override global settings** option. + + + + + Tyk lets you set global portal settings that apply to **all portal-listed APIs**, in this guide we assume you’re enabling and setting up DCR for a single API. In case you want to enable DCR for all the APIs, you should go to the **Settings** section under **Portal Management**, and in the **API Access** tab you can enter your DCR settings there. + + + +4. Scroll down to the DCR section and enter the following settings: + +Step 7 + +**Providers:** Different providers might implement the standard in slightly different ways, Tyk provides a specific driver for each one. For IDPs that aren’t on the list use the **Other** option. + +**Grant Types:** The [OAuth 2.0 grant types](/api-management/authentication/oauth-2) that will be used by the client, see the [specification](https://openid.net/specs/openid-connect-registration-1_0.html#rfc.section.2) for more details. + +**Token Endpoint Auth Method:** defines the way the client will authenticate against the token endpoint. + +**Response Types:** OAuth 2.0 response types that will be used by the client. + +**Identity Provider Host:** Base IDP URL, e.g. `https://keycloak:8443/` + +**Client Registration Endpoint:** OpenID Connect client registration endpoint. This value is found in your well-known discovery document as `registration_endpoint`. The well-known location URL is typically `https://keycloak:8443/.well-known/openid-configuration` + +**Initial Registration Access Token:** the token that’s used to register new clients, this was generated in the early steps of the guide. + +### Testing the flow + +Now that both Tyk and Keycloak are ready we can test the complete flow. + +1. Click **Developers** under **Portal Management** +2. Click on **Add developer** and create a developer user. + +After the developer is created, open your Developer Portal, click on the **OAuth Clients** navigation bar button and follow the wizard: + +Step 8 + +Click **Create first OAuth Client**. You’ll see your previously created **Keycloak API**, select it and click **Save and continue**. The following screen will require you to enter a client name. It’s also possible to set redirect URLs if you also plan to use this client for other flow types. This setting can be left blank for the purposes of this guide. + +Step 9 + +Once you click **Create**, Tyk will trigger a registration on your IDP and the details of your client will be displayed: + +Step 10 + +If you check the Keycloak dashboard you will see this client too: + +Step 11 + +The next step is to generate a token and use it for accessing your **Keycloak API**. We'll use Postman for this. You will need your token URL which is also the well-known URL for your organization. +For this guide we use `https://keycloak:8443/auth/realms/master/protocol/openid-connect/token` + +Your Postman request should contain the following body, where `"client_id"` and `"client_secret"` are the credentials you got from the developer portal: + +Step 12 + +Note that we aren’t using any additional headers for this request, the client credentials are enough. + +Once we get a response from the IDP, we can copy the `"access_token"` and use it to access our **Keycloak API**, this request will be proxied by Tyk: + +Step 13 diff --git a/tyk-developer-portal/tyk-portal-classic/monetise.mdx b/tyk-developer-portal/tyk-portal-classic/monetise.mdx new file mode 100644 index 000000000..452c3d0ee --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/monetise.mdx @@ -0,0 +1,35 @@ +--- +title: "Monetize" +order: 11 +noindex: True +sidebarTitle: "Monetising your APIs" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +Out of the box, the Tyk Developer Portal does not have a billing component, however, this does not mean that it is not possible to enable monetization within a Portal developer access flow. + +### The Developer Key Request Flow + +When a developer enrolls for API access with a Tyk portal system, they will: + +1. Sign up +2. Select a catalog entry to participate in +3. Submit a key request form +4. Receive their token + +With Tyk, it is possible to prevent step 4, which auto-enables the key, and instead have the developer redirected to a third party app. This app can then handle any transactional process such as taking a credit card number or pre-validating the developer, before returning the developer to the Portal. + +When Tyk hands off to the redirected app, it will also add the key request ID to the request, so the application that handles the transaction can then use the Tyk Dashboard REST API to approve the key request (triggering the email that notifies the developer of their token, as well as notifying the calling application of the raw token), closing the loop. + +To enable the developer hand-off in a Tyk Portal, from the **Portal Settings** enable the redirect option: + +Redirect key requests form + +## Example Using Stripe + +In this video, we walk you through setting up Stripe to take payments via your Tyk Developer Portal. + + \ No newline at end of file diff --git a/tyk-developer-portal/tyk-portal-classic/okta-dcr.mdx b/tyk-developer-portal/tyk-portal-classic/okta-dcr.mdx new file mode 100644 index 000000000..82fff33b9 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/okta-dcr.mdx @@ -0,0 +1,172 @@ +--- +title: "Step by step guide using Okta" +order: 2 +noindex: True +sidebarTitle: "Step by step guide using Okta" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +## Introduction + +We are going walk you through a basic integration of Tyk with Okta using the [OpenID Connect Dynamic Client Registration protocol](https://tools.ietf.org/html/rfc7591). Our current implementation provides support for the client credentials flow with support for JWT. + +The user journey is as follow: + +1. A developer signs up and creates a Dynamic Client Registration provider using the Developer Portal. + +2. Tyk sends the Dynamic Client Registration call to your IDP. The IDP replies with the client ID and secret. + +3. Using that information, the developer (or the application) triggers a call to the token endpoint of the IDP. + +4. The developer (or the application) then triggers a call to Tyk, using the token that was generated by the IDP. Tyk validates this token using the JWKS provided by the IDP. + +### Requirements + +- An OKTA account (a [trial account](https://www.okta.com/free-trial/) should be enough). +- A [Tyk Self Managed installation](/tyk-self-managed/install) (Gateway + Dashboard). + +### Getting started with OKTA + +First signup to OKTA, the initial screen looks like: + +Step 1 + +The first thing you’ll need for our integration is an API token from OKTA, the OpenID specification also calls this an [Initial Access Token](https://openid.net/specs/openid-connect-registration-1_0.html#Terminology) to differentiate it from other tokens that are used with this protocol. To create this token, click **API** option from the **Security** menu on the navigation bar: + +Step 2 + +From the API section, select the **Tokens** tab and click **Create Token** and enter a name for the token. For this guide we’re calling it "Tyk Integration": + +Step 3 + +Click **Create Token**. Keep it safe as you'll use this token to configure Tyk. + +Next you need to create a scope, from the **Authorization servers** tab in the API section, click **Add Scope**. You need to select the **Set as default scope** option: + +Step 4 + +### Setting up Tyk + +Now you're ready to set up Tyk. For compatibility reasons, check your `tyk_analytics.conf` and make sure that a proper `oauth_redirect_uri_separator` parameter is set. You may use the following value: + +```json + "oauth_redirect_uri_separator": ";", +``` + +Remember to restart the service after applying the above change. + +Now open the Tyk Dashboard and click **APIs** under **System Management**. Create a new API called "OKTA API": + +Step 5 + +Complete first part of the API creation form, then click **Configure API** and set the Authentication mode as in the image below: + +Step 6 + + + +Where do I get the proper JWKS URI for my Keycloak environment? + +From the OKTA Dashboard, open the **API** section under **Security**, take the base URL from the default Authorization Server and append the `/v1/keys` suffix, e.g. `https://tyk-testing.okta.com/oauth2/default/v1/keys`. + + + +For the **Identity Source** field use `"sub"` and for **Policy Field Name** use `"pol"`. + +1. Click **Save** +2. Select **Policies** under **System Management** +3. Click **Create a Policy** and call it **OKTA Policy**. Use the default values for this policy. +4. In the **Access rights** section, select your previously created **OKTA API**. You will also need to enter an expiration setting for your keys. + +After the policy is created, switch back to the API settings and make sure that the API is using your **OKTA Policy** policy: + +Step 7 + +Now you're ready to add this API to the Developer Portal. +1. Click **Catalog** under **Portal Management** on the navigation menu. +2. Click **Add New API**, enter a name for it and select the newly created policy. Again, you will use **OKTA API**: + +Step 8 + +1. Click **Save** then open the API added again +2. Open the **Settings** tab. +3. In **API Details** select the **Override global settings** option. + + + + + Tyk lets you set global portal settings that apply to **all portal-listed APIs**, in this guide we assume you’re enabling and setting up DCR for a single API. In case you want to enable DCR for all the APIs, you should go to the **Settings** section under **Portal Management**, and in the **API Access** tab you can enter your DCR settings there. + + + +4. Scroll down to the DCR section and enter the following settings: + + +Okta Grant Types + + +**Providers:** Different providers might implement the standard in slightly different ways, Tyk provides a specific driver for each one. For IDPs that aren’t on the list use the "Other" option. For this guide, pick "OKTA". + +**Grant Types:** The grant types that will be used by the client. See the [specification](https://openid.net/specs/openid-connect-registration-1_0.html#rfc.section.2) for more details. You need to enter the following grant types: + * Client Credentials + * Implicit + * Authorization Code + +**Token Endpoint Auth Method:** defines the way the client will authenticate against the token endpoint. Use "Client Secret - Post". + +**Response Types:** OAuth 2.0 response types that will be used by the client. Set **Token**. + +**Identity Provider Host:** Base IDP URL, e.g. `https://tyk-testing.okta.com/` + +**Client Registration Endpoint:** OpenID Connect client registration endpoint. The value we use is `https://tyk-testing.okta.com/oauth2/v1/clients` + +This value is found in your well-known discovery document as `registration_endpoint`. The well-known location URL is typically `https://tyk-testing.okta.com/.well-known/openid-configuration` (replace "tyk-testing" with your org.). + +**Initial Registration Access Token:** the token that’s used to register new clients, this was generated in the early steps of the guide. + + + +A note on grant types and response types in OKTA + +It’s important to note that OKTA’s DCR endpoint supports a parameter called `"application_type"`, the application types aren’t standard across all IDPs, while the initial specification mentions `"native"` or `"web"` types, some IDPs implement their own. In the current implementation Tyk supports the usage of the `"web"` application type which is necessary in supporting the client credentials flow that’s described in this guide, as well as others, this is set automatically when OKTA is set as the provider. Currently, the ability to change the application type is available with the Enterprise Developer Portal. + + + +### Testing the flow + +Now that both Tyk and OKTA are ready we can test the complete flow. + +1. Click **Developers** under **Portal Management** +2. Click on **Add developer** and create a developer user. + +After the developer is created, open your Developer Portal, click on the **OAuth Clients** navigation bar button and follow the wizard: + +Step 10 + +Click **Create first OAuth Client**. You’ll see your previously created **OKTA API**, select it and click **Save and continue**. The following screen will require you to enter a client name. It’s also possible to set redirect URLs if you also plan to use this client for other flow types. This setting can be left blank for the purposes of this guide. + +Step 11 + +Once you click **Create**, Tyk will trigger a registration on your IDP and the details of your client will be displayed: + +Step 12 + +If you check the OKTA dashboard you will see this client too: + +Step 13 + +The next step is to generate a token and use it for accessing our **OKTA API**. We'll use Postman for this. You will need your token URL which is also the well-known URL for your organization. +For this guide you'll use `https://[org].okta.com/oauth2/default/v1/token` + +Your Postman request should contain the following body, where `"client_id"` and `"client_secret"` are the credentials you got from the developer portal: + +Step 14 + +Note that we aren’t using any additional header for this request, the client credentials are enough. We’re also passing our previously created `"tyk"` scope as value. + +Once we get a response from the IDP, we can copy the `"access_token"` and use it to access our **OKTA API**, this request will be proxied by Tyk: + +Step 15 diff --git a/tyk-developer-portal/tyk-portal-classic/portal-concepts.mdx b/tyk-developer-portal/tyk-portal-classic/portal-concepts.mdx new file mode 100644 index 000000000..40589d309 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/portal-concepts.mdx @@ -0,0 +1,94 @@ +--- +title: "Portal Concepts" +order: 1 +noindex: True +sidebarTitle: "Portal Concepts" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +## API Catalog + +The API Catalog is a list of APIs that you have published to your portal. + +The API Catalog entry is not a one-to-one map between an API you manage in Tyk, since you might want to compose multiple managed services into a single public-facing API Facade, a catalog entry is actually an entry that maps against a security policy. + +From the API Catalog, a user can either: + +- View the documentation for the API +- Request for a token to the API + +When a developer requests a token, a new Auth token is generated on the linked policy, instead of the actual API, since you may wish to publish multi-tier access to the same API (E.g. Bronze / Silver / Gold). + +## Key Requests + +A key request is a record that is generated when a developer requests an access token for an API published in the API Catalog. The Key request encompasses the following information: + +Read more about them in the [Key Request section](/tyk-developer-portal/tyk-portal-classic/key-requests) + +### Multiple APIs for a single Key Request + +New for v1.9, a developer can now request access to multiple APIs with a single key request. The APIs you group together via a single key should all be of the same authentication type. + +Multiple APIs per Key Request + +To enable this functionality, select **Enable subscribing to multiple APIs with a single key** from the Portal Management Settings. + +Multiple APIs + +### Edit APIs associated with a single Key Request + +New for v1.9.4, if you have **Enable subscribing to multiple APIs with a single key** selected you can edit the APIs associated with the Key. You can perform the following: + +* Remove access to existing APIs +* Subscribe to new APIs (of the same authentication type as the existing ones). + + Edit APIs + + +If a new API requires key approval, the new key request will be generated, and access to this API will be granted after your admin approves it. + + +## Policies + +In the context of the developer portal, a security policy is the main "element" being exposed to public access. The policy is the same as a standard policy, and the policy forms the baseline template that gets used when the portal generates a token for the developer. + +Security policies are used instead of a one-to-one mapping because they encapsulate all the information needed for a public API program: + +1. Rate limits +2. Quota +3. Access Lists (What APIs and which versions are permitted) +4. Granular access (Which methods and paths are allowed, e.g. you may want to only expose read-only access to the portal, so only GET requests are allowed) +5. Multi-policy-management (With a Key, you can assign more than one policy to an APIs and each policy will have it's own counter). + +Within the developer portal admin area, under a developer record, you will see their subscriptions. Those subscriptions represent the tokens they have and their policy level access. It is possible to then "upgrade" or "downgrade" a developers access without actually managing their token, but just assigning a new policy to that token. + +## Documentation + +Within the portal, documentation is what a developer can use to learn how to access and use your APIs. + +The developer portal supports two types of documentation, and will render them differently: + +1. API Blueprint - this is rendered to HTML templates using Jade and Aglio. +2. Swagger/OpenAPI (OpenAPI 2.0 and 3.0 are supported) - either by pasting your Swagger JSON or YAML content into the code editor, or by linking to any public facing Swagger URL. The URL version can be rendered using [Swagger UI](https://swagger.io/tools/swagger-ui/) which offers a sandbox environment where developers can interact with your API from the browser. + + + + + Support for API Blueprint is being deprecated. See [Importing APIs](/api-management/gateway-config-managing-classic#api-blueprint-is-being-deprecated) for more details. + + + +Within an API Catalog entry, documentation must be attached to the catalog entry for it to be published. + +## Developers + +Within the developer portal, a developer is an end-user that has access to the developer portal section of the portal website. This user is completely separate from Tyk Dashboard users and they do not ever intersect (they are also stored separately). + +A developer record consists of some basic sign-up information and a set of admin-definable fields that get attached to the developer as metadata. + +Within the developer view of the Tyk Dashboard, it is possible to manage all access of a developer, including the access levels of their tokens. + + diff --git a/tyk-developer-portal/tyk-portal-classic/portal-events-notifications.mdx b/tyk-developer-portal/tyk-portal-classic/portal-events-notifications.mdx new file mode 100644 index 000000000..5d0deea16 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/portal-events-notifications.mdx @@ -0,0 +1,111 @@ +--- +title: "Portal events and notifications" +order: 9 +noindex: True +sidebarTitle: "Events and Notifications" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +Tyk enables you to actively monitor both user and organization quotas. These active notifications are managed in the same way as webhooks and provides an easy way to notify your stakeholders, your own organization or the API end user when certain thresholds have been reached for their token. + +### Tyk Cloud Users + +Monitors are disabled by default in Tyk Cloud. Portal events are enabled and can be defined by raising a support ticket. + +### How to Enable Monitors + +See [Monitors](/api-management/gateway-events#monitoring-quota-consumption) for details of how to configure quota consumption monitors. + +### Portal Events + +The Tyk Dashboard and the Portal now support email notifications powered by Mandrill, Sendgrid, Mailgun and Amazon SES. + +#### How Email Notifications Work + +If you have enabled email notifications, the Portal will attempt to send notifications regarding a user's sign-up status or key request status to their username email address. These templates can be found in the `portal/email_templates` folder. + +The templates are available as text based or HTML. See the standard included ones to see the various template fields that can be customized. + +### Extra Dashboard And Portal Events + +The Dashboard and Portal also support a certain level of events that you can use to notify your system of various things that have happened in the Portal. + +To configure them, add an `event_options` section to an Organization when you are creating them. See [Creating an Organization via the Dashboard Admin API](/api-management/dashboard-configuration#create-an-organization) for more details. + +Within this object, you can then register webhooks or/and an email address to notify when an event occurs: + +```{.copyWrapper} +event_options: { + api_event: { + webhook: "http://posttestserver.com/post.php?dir=tyk-events", + email: "test@test.com" + }, + key_event: { + webhook: "http://posttestserver.com/post.php?dir=tyk-key-events", + email: "test@test.com" + }, + key_request_event: { + webhook: "http://posttestserver.com/post.php?dir=tyk-key-events", + email: "test@test.com" + } +} +``` + +The following events are supported: + +* `api_event`: When an API is created, updated or deleted. + +* `key_event`: When a key is created, updated or deleted. + +* `key_request_event`: When a Portal key request is created or updated. + +Sample **Webhook** Payload for a **Key Request** Event: +```{.json} +{ + "event": "key_request_event.submitted", + "data": { + "id": "5e543dd0f56e1a4affdd7acd", + "org_id": "5e2743567c1f8800018bdf35", + "for_plan": "5e2744897c1f8800018bdf3b", + "apply_policies": [ + "5e2744897c1f8800018bdf3b" + ], + "by_user": "5e430ef68131890001b83d2e", + "approved": false, + "date_created": "2020-02-24T16:19:12.175113-05:00", + "portal_developer": { + "id": "5e430ef68131890001b83d2e", + "email": "dev@dev.ca", + "date_created": "2020-02-11T15:30:46.003-05:00", + "inactive": false, + "org_id": "5e2743567c1f8800018bdf35", + "keys": { + "6dc2dfc0": [ + "5e431f938131890001b83d30" + ] + }, + "subscriptions": { + "5e431f938131890001b83d30": "6dc2dfc0" + }, + "last_login_date": "2020-02-11T16:43:39.858-05:00" + }, + "catalogue_entry": { + "name":"frontend APIs", + "short_description":"", + "long_description":"", + "show":true, + "api_id":"", + "policy_id":"5e2744897c1f8800018bdf3b", + "documentation":"5e3b477a7c1f8800013603c6", + "version":"v2", + "is_keyless":false, + "config":{ + + } + } + } +} +``` \ No newline at end of file diff --git a/tyk-developer-portal/tyk-portal-classic/portal-oauth-clients.mdx b/tyk-developer-portal/tyk-portal-classic/portal-oauth-clients.mdx new file mode 100644 index 000000000..00daaaf86 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/portal-oauth-clients.mdx @@ -0,0 +1,54 @@ +--- +title: "Portal OAuth Clients" +order: 10 +noindex: True +sidebarTitle: "Portal OAuth Clients" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +From Tyk Dashboard v1.8, you can now create and manage OAuth clients from the Developer Portal. + +## Prerequisites + +1. An API created in your Dashboard using Tyk's ability to act as a OAuth provider. You need to have [OAuth 2.0](/api-management/authentication/oauth-2) selected as the Authentication mode. See [Create an API](/api-management/gateway-config-managing-classic#create-an-api) for more details. +2. A Policy created in your Dashboard with the API created above selected in the **Access Rights > Add access rule** drop-down. See [Create a Security Policy](/api-management/gateway-config-managing-classic#secure-an-api) for more details. +3. A Portal Catalog entry for the API created above with the Policy you created selected from the **Available policies** drop-down. See [Create a Portal Entry](/getting-started/tutorials/publish-api) for more details. +4. A developer account created in your Developer Portal. + +## Create the OAuth Client from the Portal + +1. Login to your Portal: + +Developer Portal Home Screen + +2. Select **OAuth Clients** from the top menu +3. If this is the first OAuth Client you are creating, the screen will be as below: + +Developer OAuth Home Screen + +4. Click **Create first OAuth Client** +5. Hover over the API you added to the Catalog with OAuth Authentication mode from the drop-down list: + +Select API Screen + +6. Click **Select API** +7. Then click **Save and continue**: + +Save + +8. You can now add details about your application, and set the redirect URL to the application. If you want to use this client for more than one application, you can add other redirect URLs as necessary. +9. Click **Create** + +Create + +10. You need to copy and save the displayed Client Secret, as you will not be able to view it from the Portal again. The secret is stored on the Dashboard and are listed for each developer under the **Portal Management > Developers** menu. + +secret + + +## Revoke OAuth Client Tokens + +See [Revoke OAuth Tokens](/api-management/authentication/oauth-2#revoking-access-tokens) for more details. \ No newline at end of file diff --git a/tyk-developer-portal/tyk-portal-classic/tyk-portal-classic/customise/customise-with-templates.mdx b/tyk-developer-portal/tyk-portal-classic/tyk-portal-classic/customise/customise-with-templates.mdx new file mode 100644 index 000000000..2ce076288 --- /dev/null +++ b/tyk-developer-portal/tyk-portal-classic/tyk-portal-classic/customise/customise-with-templates.mdx @@ -0,0 +1,106 @@ +--- +title: "Customize Page Templates" +order: 2 +noindex: True +sidebarTitle: "Customise Page Templates" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + +The Tyk Developer Portal can be fully customized using templates. The templates for the Portal are only available to Self-Managed users currently. These templates are located in the `/opt/tyk-dashboard/portal` folder of your Tyk installation. + +All templates are based on Twitter Bootstrap and are standard HTML with some Golang Template snippets to handle dynamic content rendering. + + + +The Portal process (`tyk-analytics`) must be restarted for template changes to take effect. This is because the application caches templates on startup. + + + + +### Adding new templates + +The Tyk content editor enables you to specify a template name to use when rendering templates. two are provided by default: + +* Default Home Page Template +* Default Page Template + +The third option is "Custom" and this allows you to enter a template name into the field editor that will set the template name to use on render. + +To set a new template name up in your Tyk installation, you will need to add the file to the `portal` folder and ensure it starts and ends with the templates directive: + +``` +{{ define "customPage" }} + Provider1[Tyk Dashboard] + Agent --> Provider2[AWS API Gateway] + Agent --> Provider3[Other Providers] + end + + subgraph "Governance Hub" + Hub[Governance Service] --- APIRepo[API Repository] + end + + Agent <-->|gRPC Streams| Hub +``` + +The agent establishes two persistent gRPC streams with the Governance Hub: + +1. **Health Stream**: Sends regular heartbeats to indicate the agent is alive and functioning +2. **Sync Stream**: Used for API synchronization operations + +When multiple agent replicas are deployed with leader election enabled, they use Kubernetes leader election to ensure only one instance actively performs synchronization, while others stand by as hot backups. + +### Synchronization Process + +Synchronization can be triggered in three ways: + +1. **Manual Trigger**: Through the Governance Hub UI or API + + + +2. **Scheduled Sync**: At regular intervals configured in the agent. See [Understanding Scheduled Synchronization](#understanding-scheduled-synchronization). + +3. **Initial Connection**: When an agent first connects to the Governance Hub + +#### Understanding Scheduled Synchronization + +You can configure scheduled synchronization for each API provider using the **Governance Hub API (Hub-side scheduling)**. + +Use the `/api/agents/{id}/sync-jobs` endpoint to configure provider-specific schedules: + +```sh +# Schedule sync for a specific provider +curl -X POST "${GOVERNANCE_URL}/api/agents/${AGENT_ID}/sync-jobs" \ + -H "X-API-Key: ${API_KEY}" \ + -H "Content-Type: application/json" \ + -d '{ + "repeat_every": "12h", + "provider": "tyk-provider", + "start_from": "2023-06-01T12:00:00Z" + }' +``` + +With this approach: +- The Governance Hub's scheduler component manages the schedule +- The Hub initiates synchronization by sending requests to the agent +- It's a "push" model where the Hub tells the agent when to sync +- You can set different schedules for different providers +- Supports Go duration format (e.g., "1h", "12h", "7d") + +During synchronization: + +1. The agent receives a sync request from the hub (or initiates it based on its schedule) +2. The agent queries each configured API provider for APIs +3. The agent processes and normalizes the API definitions +4. The agent streams the API definitions to the hub +5. The hub processes and stores the API definitions +6. The hub reconciles the API inventory, marking missing APIs as deleted + +### Deployment Scenarios + +#### Multi-Provider API Discovery + +Deploy agents to connect to different API providers across your organization, creating a comprehensive API inventory that spans platforms. + +```yaml +# Agent configuration with multiple providers +instances: + - name: "tyk-dashboard" + type: "tyk" + config: + host: "http://tyk-dashboard:3000" + auth: "your-tyk-api-key" + + - name: "aws-us-east" + type: "aws" + config: + accessKeyId: "your-aws-access-key" + accessKeySecret: "your-aws-secret-key" + region: "us-east-1" + + - name: "aws-eu-west" + type: "aws" + config: + accessKeyId: "your-aws-access-key" + accessKeySecret: "your-aws-secret-key" + region: "eu-west-1" +``` + +#### High Availability Agent Deployment + +Deploy multiple agent replicas in Kubernetes to ensure continuous API discovery even if some instances fail. + +```yaml +# Kubernetes deployment with leader election +apiVersion: apps/v1 +kind: Deployment +metadata: + name: governance-agent +spec: + replicas: 3 # Multiple replicas for redundancy + template: + spec: + containers: + - name: agent + env: + - name: POD_NAME # When leader election is enabled, POD_NAME environment variables must be set + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE # When leader election is enabled, POD_NAMESPACE environment variables must be set + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: TYK_AGENT_LEADERELECTION_ENABLED + value: "true" +``` + +## Configuration Options + +### Agent Configuration File + +The agent is configured using a YAML configuration file with the following sections: + +#### Core Configuration + +```yaml +# Governance Dashboard Connection +governanceDashboard: + server: + url: "your-governance-instance.tyk.io:50051" + auth: + token: "your-agent-token" + +# Log level (debug, info, warn, error) +logLevel: info + +# Health probe configuration +healthProbe: + server: + port: 5959 +``` + +#### API Provider Configuration + +```yaml +# API Provider Configurations +instances: + # Tyk Provider + - name: "tyk-provider" + type: "tyk" + config: + host: "http://dashboard:3000" + auth: "your-tyk-api-key" + + # AWS API Gateway Provider + - name: "aws-provider" + type: "aws" + config: + accessKeyId: "your-aws-access-key-id" + accessKeySecret: "your-aws-access-key-secret" + region: "us-east-1" + # Optional session token for temporary credentials + sessionToken: "your-aws-session-token" +``` + +#### gRPC Connection (New in v0.2) + +```yaml +rpc: + # Keepalive configures the keepalive settings for the gRPC connection. + keepalive: + # Enabled controls whether keepalive is enabled. + enabled: true + # Time is the duration after which if there are no activities, ping will be sent. + time: 30s + # Timeout is the duration the client waits for a response to a keepalive ping. + timeout: 20s + # PermitWithoutStream if true allows sending pings even without active streams. + permitWithoutStream: true +``` + +#### High Availability Configuration (New in v0.2) + +```yaml +leaderElection: + # Enable or disable leader election + enabled: true + # Name of the Kubernetes lease object used for leader election + leaseName: "governance-agent-lock" + # Namespace where the lease object will be created + # If not specified, the agent's namespace will be used + leaseNamespace: "" + # Duration that non-leader candidates will wait before attempting to acquire leadership + leaseDuration: "15s" + # Duration that the acting leader will retry refreshing leadership before giving up + renewDeadline: "10s" + # Duration the leader elector clients should wait between leadership acquisition attempts + retryPeriod: "2s" +``` + +### Environment Variables + +The agent supports configuration through environment variables: + +| Environment Variable | Description | Default Value | +| :--------------------- | :------------- | :--------------- | +| `TYK_AGENT_LICENSEKEY` | Your Tyk Governance license key | - | +| `TYK_AGENT_LOGLEVEL` | Log level (debug, info, warn, error) | `info` | +| `TYK_AGENT_GOVERNANCEDASHBOARD_SERVER_URL` | The gRPC endpoint URL of the Tyk Governance service | - | +| `TYK_AGENT_GOVERNANCEDASHBOARD_SERVER_TLS_ENABLED` | Enable TLS for gRPC connections | `false` | +| `TYK_AGENT_GOVERNANCEDASHBOARD_SERVER_TLS_CACERTPATH` | Path to CA certificate | - | +| `TYK_AGENT_GOVERNANCEDASHBOARD_SERVER_TLS_CLIENTCERTPATH` | Path to client certificate (for mTLS) | - | +| `TYK_AGENT_GOVERNANCEDASHBOARD_SERVER_TLS_CLIENTKEYPATH` | Path to client key (for mTLS) | - | +| `TYK_AGENT_GOVERNANCEDASHBOARD_SERVER_TLS_INSECURESKIPVERIFY` | Skip verification of server certificate | `false` | +| `TYK_AGENT_GOVERNANCEDASHBOARD_AUTH_TOKEN` | Authentication token for the agent | - | +| `TYK_AGENT_HEALTHPROBE_SERVER_PORT` | Port for health probe server | `5959` | + +### gRPC Connection Variables (New in v0.2) + +| Environment Variable | Description | Default Value | +| :--------------------- | :------------- | :--------------- | +|`TYK_AGENT_RPC_KEEPALIVE_ENABLED`|Enables/disables keepalive|`true`| +|`TYK_AGENT_RPC_KEEPALIVE_TIME`|Duration after which ping is sent|`30s`| +|`TYK_AGENT_RPC_KEEPALIVE_TIMEOUT`|Duration client waits for ping response from the server|`20s`| +|`TYK_AGENT_RPC_KEEPALIVE_PERMITWITHOUTSTREAM`|Allows sending pings without active streams|`true`| + +### Leader Election Variables (New in v0.2) + +| Environment Variable | Description | Default Value | +| :--------------------- | :------------- | :--------------- | +|`TYK_AGENT_LEADERELECTION_ENABLED`|Enable Kubernetes leader election|`false`| +|`TYK_AGENT_LEADERELECTION_LEASENAME`|Name of the lease object|`governance-agent-lock`| +|`TYK_AGENT_LEADERELECTION_LEASENAMESPACE`|Namespace for the lease object|Agent's namespace| +|`TYK_AGENT_LEADERELECTION_LEASEDURATION`|Duration for lease|`15s`| +|`TYK_AGENT_LEADERELECTION_RENEWDEADLINE`|Deadline for renewing leadership|`10s`| +|`TYK_AGENT_LEADERELECTION_RETRYPERIOD`|Period between retry attempts|`2s`| + +#### Required Environment Variables for Leader Election + +When leader election is enabled, the following environment variables must also be set: + +- `POD_NAME`: The name of the pod (used as the identity for leader election) +- `POD_NAMESPACE`: The namespace of the pod (used for creating the lease object) + +These are typically set automatically when deploying with Kubernetes using the downward API: + +```yaml +env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +``` + +## Use Cases + +### Multi-Region API Discovery + +For organizations with APIs deployed across multiple geographic regions, deploy region-specific agents to efficiently discover and monitor APIs while respecting network boundaries. + +**Implementation:** + +1. Deploy agents in each region where APIs are hosted +2. Configure each agent with the appropriate regional API providers +3. Use descriptive names to identify regional agents +4. Monitor all agents from the central Governance Hub + +**Benefits:** + +- Reduced latency for API discovery operations +- Respect for network boundaries and security zones +- Improved reliability with region-specific agents +- Clear organization of APIs by region + +### Secure Environment Bridging + +For organizations with strict network segmentation, use agents to securely bridge between isolated environments without compromising security boundaries. + +**Implementation:** + +1. Deploy agents in each network segment +2. Configure outbound-only connections from agents to the Governance Hub +3. Use separate agents for production and non-production environments +4. Implement appropriate network security controls around agent traffic + +**Benefits:** + +- Maintain network isolation while enabling governance +- No inbound connections required to sensitive environments +- Granular control over which APIs are discovered +- Separation of concerns between environments + +### Automated API Lifecycle Tracking + +Use scheduled synchronization to automatically track the lifecycle of APIs, including when they're created, updated, or deleted from source providers. + +**Implementation:** + +1. Configure agents with scheduled synchronization +2. Set appropriate sync intervals based on change frequency +3. Use the API Repository to monitor API status +4. Create reports showing API lifecycle events + +**Benefits:** + +- Automatic detection of API changes +- Historical record of API lifecycle events +- Reduced manual tracking effort +- Improved visibility into API landscape evolution + +## Best Practices and Recommendations + +- **Use descriptive agent names** that indicate their purpose and scope +- **Deploy agents close to API providers** to minimize latency and network issues +- **Configure appropriate sync intervals** based on how frequently your APIs change +- **Use leader election for high availability** when deploying multiple agent replicas +- **Monitor agent health regularly** to ensure continuous API discovery +- **Rotate API provider credentials periodically** for better security +- **Use the principle of least privilege** when configuring API provider credentials +- **Start with manual syncs** before enabling scheduled synchronization +- **Implement network security controls** around agent traffic +- **Maintain agent versions** to ensure compatibility with the Governance Hub + +## FAQs + + + +The number of agents depends on your API landscape. Generally, you should consider deploying separate agents for: + +- Different network segments or security zones +- Different geographic regions +- Different environments (production vs. non-production) +- Different API provider types with many APIs + +A single agent can connect to multiple API providers of different types, so you don't necessarily need one agent per provider. + + + +If an agent goes offline, it will be marked as "INACTIVE" in the Governance Hub after missing several heartbeats. APIs discovered by that agent will remain in the repository but won't be updated until the agent reconnects or another agent is configured to find out the same APIs. + +When using high availability with leader election, if the leader agent goes offline, another replica will automatically take over the leadership role and continue synchronization operations. + + + +All communication between agents and the Governance Hub is secured using: + +- TLS encryption for all traffic +- JWT token-based authentication +- Regular token validation +- Bidirectional stream validation + +The agent only requires outbound connectivity to the Governance Hub, with no inbound connections required. + + + +Agents require read-only access to API configurations: + +- For Tyk Dashboard: An API key with read access to APIs and policies +- For AWS API Gateway: IAM credentials with permissions to list and get API Gateway resources +- For other providers: Similar read-only access to API configurations + +The agent never requires write access to API providers. + + + +## Troubleshooting + + + +- Check if the agent process is running +- Verify network connectivity to the Governance Hub +- Ensure the agent token is valid and not expired +- Check agent logs for connection errors +- Verify the Governance Hub URL is correct +- Ensure the agent has outbound access to the hub's gRPC port. It is usually 50051 for self-managed instances. For Tyk Cloud managed instance, it is proxied through port 443. + + + +- Verify API provider credentials are correct +- Check agent logs for provider connection errors +- Ensure the provider has APIs configured +- Try triggering a manual sync operation +- Check agent configuration for correct provider URLs +- Verify the agent has network access to the API providers + + + +- Verify leader election is properly configured +- Check if agents are in the same Kubernetes namespace +- Ensure agents are using the same lock name +- Check Kubernetes permissions for leader election +- Verify Kubernetes API access from agent pods +- Check agent logs for leader election messages + + + +- Ensure the agent has been running long enough for a scheduled sync +- Check agent logs for scheduled sync messages +- Try restarting the agent to reset the schedule +- Verify the agent is the leader if using leader election + + + diff --git a/tyk-governance/api-evaluation.mdx b/tyk-governance/api-evaluation.mdx new file mode 100644 index 000000000..8b4c8ec13 --- /dev/null +++ b/tyk-governance/api-evaluation.mdx @@ -0,0 +1,343 @@ +--- +title: "API Evaluation" +description: "Validate API specifications against governance policies before deployment to catch compliance issues early in the development lifecycle and reduce rework." +keywords: "Tyk Governance, API Evaluation, Rulesets, API Validation, Shift-Left Governance" +sidebarTitle: "API Evaluation" +--- + +## Availability + +- Version: Available since v0.2 + +## Overview + +API Evaluation enables you to validate API specifications against governance policies before deployment, without requiring the API to be published or stored in your API Repository. This feature helps you catch compliance issues early in the development lifecycle, reducing rework and accelerating the delivery of high-quality APIs. + +### Key Benefits + +- **Shift-Left Governance**: Catch compliance issues during design and development, not after deployment +- **Reduce Development Cycles**: Identify and fix issues before they reach code review or testing phases +- **Seamless Integration**: Easily incorporate governance checks into CI/CD pipelines and development workflows +- **Detailed Feedback**: Receive precise information about violations with line numbers and remediation guidance +- **No Storage Required**: Validate API specifications without storing them in your API Repository + +### Dependencies + +- Requires Tyk Governance v0.2 or higher +- Requires at least one governance ruleset to be defined + +## Quick Start + +In this tutorial, we'll validate an API specification against a governance ruleset before deployment. + +### Prerequisites + +- Access to Tyk Governance Hub +- A governance ruleset ID +- An API specification to validate (in OpenAPI format) + +### Step-by-Step + +1. **Identify Your Ruleset** + + Navigate to the Rulesets section in your Tyk Governance dashboard and note the ID of the ruleset you want to use for validation. + +2. **Prepare Your API Specification** + + Ensure your API specification is in a valid OpenAPI format (JSON or YAML). + +3. **Make an API Request** + + Use the API Evaluation endpoint to validate your specification: + + ```sh + curl -X POST https://your-governance-instance.tyk.io/api/rulesets/evaluate-spec \ + -H "Content-Type: application/json" \ + -H "X-API-Key: YOUR_API_KEY" \ + -d '{ + "rulesetId": "$RULESET_ID", + "apiSpec": { + "name": "My Test API", + "content": { + "openapi": "3.0.0", + "info": { + "title": "Test API", + "version": "1.0.0" + }, + "paths": { + "/example": { + "get": { + "responses": { + "200": { + "description": "OK" + } + } + } + } + } + } + } + }' + ``` + +4. **Review the Results** + + The response will include any violations found, with details about each issue: + + ```json + { + "status": "Success", + "message": "Rule violation found", + "errors": [ + { + "code": "info-contact", + "path": ["info"], + "message": "API must have contact information", + "severity": "error", + "range": { + "start": { "line": 3, "character": 2 }, + "end": { "line": 6, "character": 3 } + }, + "howToFix": "Add contact information to the info section" + } + ] + } + ``` + + If there are no violations found: + + ```json + { + "status" : "Success", + "message" : "No rule violation found", + "errors":[] + } + ``` + +### Validation + +- A successful request with no violations will return an empty errors array +- If violations are found, each will include: + - The rule code that was violated + - The path in the API specification where the violation occurred + - A message explaining the issue + - The severity level (error, warning, info, hint) + - The exact location in the file (line and character) + - Guidance on how to fix the issue (if available) + +## How It Works + +API Evaluation works by sending your API specification to the Tyk Governance Hub, where it's validated against a specified ruleset without being stored in your API Repository. The system applies each rule in the ruleset to your specification and returns detailed results. + +### Integration into Development Workflow + +#### Integrating with CI/CD Pipelines + +API Evaluation can be integrated into your CI/CD pipeline to validate API specifications before they're deployed automatically. This ensures that only compliant APIs make it to production. + +```yaml +# Example GitHub Actions workflow +name: API Governance Check + +on: + pull_request: + paths: + - 'api-specs/**' + +jobs: + validate-api: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Validate API Specification + run: | + SPEC_CONTENT=$(cat api-specs/my-api.yaml | awk '{printf "%s\\n", $0}') + curl -X POST https://your-governance-instance.tyk.io/api/rulesets/evaluate-spec \ + -H "Content-Type: application/json" \ + -H "X-API-Key: ${{ secrets.GOVERNANCE_API_KEY }}" \ + -d "{ + \"rulesetId\": \"your-ruleset-id\", + \"apiSpec\": { + \"name\": \"My API\", + \"content\": \"$SPEC_CONTENT\" + } + }" > validation-results.json + + # Fail if any errors are found + ERROR_COUNT=$(jq '.errors | length' validation-results.json) + if [ $ERROR_COUNT -gt 0 ]; then + echo "API validation failed with $ERROR_COUNT issues:" + jq '.errors' validation-results.json + exit 1 + fi + +``` + +#### Pre-commit Validation + +Developers can validate their API specifications before committing changes, ensuring they meet governance standards from the start. + +```bash +#!/bin/bash +# pre-commit hook for API validation + +# Get the API specification file +SPEC_FILE=$(git diff --cached --name-only | grep -E '\.json$|\.yaml$|\.yml$' | head -1) + +if [ -n "$SPEC_FILE" ]; then + echo "Validating API specification: $SPEC_FILE" + + # Convert the file content to JSON + if [[ $SPEC_FILE == *.yaml || $SPEC_FILE == *.yml ]]; then + SPEC_CONTENT=$(yq eval -o=json $SPEC_FILE) + else + SPEC_CONTENT=$(cat $SPEC_FILE) + fi + + # Validate the specification + RESPONSE=$(curl -s -X POST https://your-governance-instance.tyk.io/api/rulesets/evaluate-spec \ + -H "Content-Type: application/json" \ + -H "X-API-Key: YOUR_API_KEY" \ + -d "{ + \"rulesetId\": \"your-ruleset-id\", + \"apiSpec\": { + \"name\": \"$(basename $SPEC_FILE)\", + \"content\": $SPEC_CONTENT + } + }") + + # Check for errors + ERROR_COUNT=$(echo $RESPONSE | jq '.errors | length') + if [ $ERROR_COUNT -gt 0 ]; then + echo "API validation failed with $ERROR_COUNT issues:" + echo $RESPONSE | jq '.errors' + exit 1 + fi + + echo "API specification is valid!" +fi + +exit 0 +``` + +## Use Cases + +### Validating APIs During Design Phase + +Integrate API Evaluation with design tools to validate specifications during the design phase, before any code is written. + +**Benefits**: + +- Catch issues at the earliest possible stage +- Reduce rework and development cycles +- Ensure designs align with governance standards from the start + +**Implementation**: + +1. Design an API in your preferred tool +2. Export the OpenAPI specification +3. Validate using the API Evaluation endpoint +4. Review and address any issues +5. Repeat until the specification passes validation + +### Automated Testing in Development Workflows + +Incorporate API Evaluation into automated testing workflows to ensure continuous compliance during development. + +**Benefits**: + +- Maintain compliance throughout the development process +- Prevent regression of governance standards +- Provide immediate feedback to developers + +**Implementation**: + +1. Add API validation as a step in your testing pipeline +2. Run validation after any changes to the API specification +3. Fail the build if critical violations are found +4. Generate reports of issues for developers to address + +### Pre-release Validation Gate + +Use API Evaluation as a final check before releasing APIs to production or external consumers. + +**Benefits**: + +- Ensure only compliant APIs are released +- Maintain consistent quality standards +- Reduce security and compliance risks + +**Implementation**: + +1. Add a validation step in your release pipeline +2. Block releases with critical violations +3. Generate compliance reports for audit purposes +4. Track compliance metrics over time + +## Best Practices and Recommendations + +- **Integrate early in development**: Validate specifications before coding begins to avoid costly rework +- **Use appropriate rulesets**: Select rulesets that match the API's purpose and criticality +- **Automate validation**: Incorporate validation into CI/CD pipelines and development workflows +- **Review results carefully**: Understand the context of each violation before fixing +- **Prioritize by severity**: Address errors first, then warnings, then informational issues +- **Track compliance trends**: Monitor how compliance improves over time +- **Update specifications incrementally**: Fix critical issues first, then address less severe ones +- **Document exceptions**: When a rule violation is intentional, document the reason +- **Provide feedback on rules**: Help improve governance rules that generate false positives +- **Use with other governance tools**: Combine with API Repository and Ruleset Management for comprehensive governance + +## FAQs + + + +The `/rulesets/evaluate-spec` endpoint is designed for validating a single API specification. For batch validation of multiple specifications, you can make multiple requests or use the `/rulesets/evaluate` endpoint if the APIs are already in your API Repository. + + + +Currently, the API Evaluation feature supports OpenAPI 3.x specifications in both JSON and YAML formats. Support for additional formats is planned for future releases. + + + +Yes, there's a 10MB limit on the size of API specifications that can be evaluated. For very large specifications, we recommend breaking them into smaller, more manageable components. + + + +No, specifications submitted through the API Evaluation endpoint are not stored in your API Repository. They are processed in memory and then discarded, making this feature suitable for validating sensitive or in-development APIs. + + + +## Troubleshooting + + + +- Ensure your request body follows the correct JSON format +- Verify that the `content` field contains a valid OpenAPI specification + +- Check for JSON syntax errors in your request +- Make sure the `rulesetId` is valid and exists in your Governance Hub + + + +- Reduce the size and complexity of your API specification +- Ensure your ruleset doesn't contain overly complex rules +- Break large specifications into smaller components +- Check network connectivity between your client and the Governance Hub + + + +- Review the ruleset being used for evaluation +- Check the specific rule that's generating the violation +- Verify your API specification against the OpenAPI specification +- Consider if the rule needs adjustment for your specific use case + + + +- Verify your API key is valid and has not expired +- Ensure you're including the API key in the correct header ( `X-API-Key` ) +- Check that your user account has permission to access the API Evaluation feature +- Verify you're using the correct Governance Hub URL + + + diff --git a/tyk-governance/api-labeling.mdx b/tyk-governance/api-labeling.mdx new file mode 100644 index 000000000..f53c816d8 --- /dev/null +++ b/tyk-governance/api-labeling.mdx @@ -0,0 +1,178 @@ +--- +title: "API Labeling and Categorization" +description: "Organize, classify, and filter your APIs using customizable metadata tags to create a structured taxonomy for your API landscape." +keywords: "Tyk Governance, API Labeling, API Categorization, API Metadata, API Organization" +sidebarTitle: "API Labeling and Categorization" +--- + +## Availability + +- Version: Available since v0.1 + +## Overview + +API Labeling and Categorization enables you to organize, classify, and filter your APIs using customizable metadata tags. This feature allows you to create a structured taxonomy for your API landscape, making it easier to search, filter, and apply governance policies based on business context, technical characteristics, or organizational ownership. + +### Key Benefits + +- Enables structured organization of APIs by business domain, criticality, and other dimensions +- Facilitates efficient search and filtering of APIs in large inventories +- Provides consistent metadata across APIs from different sources +- Supports governance policy application based on API characteristics (Governance Policy feature is coming soon) +- Enables reporting and analytics based on business context (Reporting feature is coming soon) + +## Quick Start + +In this tutorial, we'll explore how to use API labeling to categorize and filter APIs in your organization's API Repository. + +### Prerequisites + +- Access to the Tyk Governance Hub +- Governance Admin access for creating new label definitions (Note: only admin access is available at the moment) + +### Step-by-Step + +1. **Access the API Repository** + + Navigate to the API Repository section in your Tyk Governance dashboard. + +2. **Explore Default Labels** + + Tyk Governance comes with pre-configured default labels such as "Business Domain" and "API Criticality". + +3. **Apply Labels to APIs** + + Select an API and click "Edit" to apply or modify labels: + + - Set "Business Domain" to an appropriate value (e.g., "Finance", "Customer", "Product") + - Assign "API Criticality" based on the API's importance (Tier 1 for mission-critical, Tier 2 for important, Tier 3 for non-critical) + - Add any custom labels that your Governance Admin has defined + +4. **Filter APIs Using Labels** + + Use the search and filter functionality to find APIs based on their labels: + + - Filter to show only Tier 1 APIs + - Search for APIs in a specific business domain + - Combine multiple label filters for precise results + +5. **Create a Custom Label (Admin only)** + + Governance Admin users can create custom labels programmatically using the API: + + Example using cURL: + + ```bash + curl -X POST https://your-governance-instance.tyk.io/api/labels/ \ + -H "Content-Type: application/json" \ + -H "X-API-Key: YOUR_ADMIN_TOKEN" \ + -d '{ + "name": "compliance", + "values": ["PCI-DSS", "GDPR", "HIPAA"] + }' + ``` + + A successful request will return a 200 OK status code and the newly created label object: + + ```json + { + "id": "64a1b2c3d4e5f6a7b8c9d0e1", + "name": "compliance", + "values": ["PCI-DSS", "GDPR", "HIPAA"] + } + ``` + + **Notes**: + - The name field is required and must be unique + - The values field is optional. If provided, it defines the allowed values for this label + - If values is empty, the label will accept any value (free text) + - Only users with admin privileges can create labels + - Once created, labels can be applied to APIs using the `/api/{api-id}/labels` endpoint + - After creating a custom label, it will be available for selection when labeling APIs, either through the UI or via the API labeling endpoints + +### Validation + +- Labeled APIs will display their labels in the API details view +- Filtering by labels will show only matching APIs +- New custom labels will be available for application to APIs + +## How It Works + +API Labeling and Categorization works through a flexible key-value metadata system that allows both structured and free-form classification of APIs. + +### Labeling System Architecture + +1. **Bootstrap Default Labels**: During initial setup, Tyk Governance creates default label definitions such as "Business Domain" and "API Criticality" +2. **Label Definition**: Each label has: + - A unique key (e.g., "business_domain") + - A display name (e.g., "Business Domain") + - A value type (free text or predefined values) + - Optional predefined values (e.g., "Finance", "HR", "Operations") + +3. **Label Application**: Labels are applied to APIs as key-value pairs: + - Key: The label identifier (e.g., "business_domain") + - Value: The specific value for this API (e.g., "Finance") + +4. **Label Storage**: Labels are stored as metadata with each API in the repository database +5. **Search and Filter**: Tyk Governance indexes labels to enable efficient filtering and searching + +## Use Cases + +### Governance Policy Application + +Apply different governance rules based on API criticality tiers. For example, Tier 1 (mission-critical) APIs might require stricter security controls, more thorough documentation, and formal change management processes. + +### Compliance Management + +Tag APIs with relevant compliance requirements (PCI-DSS, GDPR, HIPAA) to ensure appropriate controls are applied and to facilitate compliance reporting and audits. + +### Team Ownership and Responsibility + +Label APIs by owning team or department to clarify responsibility for maintenance, support, and governance compliance. + +### API Lifecycle Management + +Use labels to indicate lifecycle stage (Development, Testing, Production, Deprecated) to manage API transitions and communicate status to consumers. + +## Best Practices and Recommendations + +- **Establish a clear labeling taxonomy** before implementing across your organization +- **Keep predefined value lists manageable** – too many options create confusion and inconsistency +- **Use hierarchical naming for related labels** (e.g., security.authentication.method, security.data.classification) +- **Document the meaning and intended use** of each label for consistent application +- **Assign label management responsibility** to a specific role or team to maintain consistency +- **Review and update labels periodically** to ensure they remain relevant as your API landscape evolves +- **Include label application in API onboarding workflows** to ensure consistent metadata from the start +- **Use consistent labeling conventions** across all APIs to facilitate effective filtering and governance +- **Combine multiple labels in filters** for more precise API discovery +- **Use criticality and domain labels** as the foundation of your governance strategy + +## FAQs + + + +Yes, Governance Administrators can create custom labels with either free text values or a predefined list of acceptable values. + + + +Labels are structured key-value pairs that can be validated and used for governance, while tags are typically simpler, unstructured text values mainly used for search. + + + +Yes, the discovery process attempts to map source system metadata to corresponding labels in the governance hub where possible. + + + +## Troubleshooting + + +- Ensure a Governance Admin has properly defined the label +- Check that at least one API has been tagged with this label +- Refresh the browser cache if the label was recently added + + + +- For predefined value labels, check that the value you're trying to add is in the allowed list +- Verify you have sufficient permissions to modify the API's labels +- Ensure the label hasn't been deprecated or replaced + diff --git a/tyk-governance/api-repository.mdx b/tyk-governance/api-repository.mdx new file mode 100644 index 000000000..77bcf3be9 --- /dev/null +++ b/tyk-governance/api-repository.mdx @@ -0,0 +1,130 @@ +--- +title: "API Repository" +description: "Learn how Tyk Governance automatically discovers and catalogs APIs across multiple sources to create a comprehensive inventory of all APIs in your organization." +keywords: "Tyk Governance, API Repository, API Discovery, API Inventory" +sidebarTitle: "Federated API Repository" +--- + +## Availability + +- Version: Available since v0.1 + +## Overview + +API Repository automatically discovers and catalogs APIs across multiple sources (Tyk, AWS API Gateway, etc) to create a comprehensive inventory of all APIs in your organization. This feature addresses API sprawl, identifies shadow APIs, and provides complete visibility into your API landscape. + +### Key Benefits + +- Creates a single source of truth for all APIs across the organization +- Identifies security risks from undocumented or unmanaged APIs +- Enables better resource management and prevents duplication +- Provides visibility into API ownership and usage patterns + +### Dependencies + +- Requires the governance agent for API discovery from non-Tyk Cloud-managed control planes and non-Tyk platforms. + +## Quick Start + +In this tutorial, we'll explore how to use the API Repository to view and manage discovered APIs in your organization. + +### Prerequisites + +- Access to the Tyk Governance Hub +- Governance agent deployed and connected to your API providers (non Tyk Cloud sources only) + +For detailed installation and configuration instructions, please refer to the [Installation and Setup](/tyk-governance/installation) page. + +### Step-by-Step + +1. **Access the API Repository** + + Navigate to the API Repository section in your Tyk Governance Hub to view discovered APIs. + +2. **Explore the API inventory** + + The dashboard provides a comprehensive view of all discovered APIs across your organization, with filtering and search capabilities. + + + +3. **Examine API details** + + Click on any API to view detailed information, including specifications, ownership, authentication methods, and governance status. + + + +## How It Works + +The API Repository works by deploying agents that connect to various API sources, extract metadata, and synchronize this information with the central governance hub. Think of it as an automated API census that continuously updates your API inventory. + +### Discovery Process + +1. **Agent Deployment**: Agents are deployed to connect with various API sources. +2. **API Source Connection**: Agents authenticate and connect to configured API sources. +3. **Metadata Extraction**: Agents extract API metadata including routes, authentication methods, and specifications. +4. **Synchronization**: Extracted data is sent to the governance hub through secure gRPC streams. +5. **Inventory Creation**: APIs are cataloged in a centralized repository with relevant metadata. +6. **Classification**: APIs can be tagged and categorized based on extracted and custom metadata. +7. **Continuous Updates**: Regular scans maintain an up-to-date inventory and identify changes. + +## Use Cases + +### Centralizing API Inventory Across Multiple Gateways + +When your organization uses multiple API gateways (Tyk, AWS, etc.), maintaining a single view of all APIs becomes challenging. API Discovery automatically aggregates APIs from all sources into a unified inventory, providing a complete picture of your API landscape without manual tracking. + +### Identifying and Managing Shadow APIs + +Shadow APIsβ€”those created outside official processesβ€”pose security and governance risks. The discovery feature continuously scans your infrastructure to identify undocumented APIs, allowing you to bring them under governance or decommission them as appropriate. + +### Streamlining API Onboarding with Automated Discovery + +For organizations with numerous APIs, manual registration is time-consuming and prone to errors. Automated discovery accelerates the onboarding process by automatically detecting new APIs and pre-populating their metadata, thereby reducing the time required to bring APIs under governance. + +### Tracking API Changes for Compliance and Audit + +When APIs change without proper documentation, it creates compliance risks. The continuous discovery process detects changes to existing APIs, maintaining an accurate, up-to-date inventory that serves as an audit trail for compliance purposes. + +### Enabling API Reuse Through Comprehensive Cataloging + +Developers often recreate APIs because they're unaware of existing ones. A complete API inventory with rich metadata enables developers to discover and reuse existing APIs, reducing duplication and development costs. + +## Best Practices and Recommendations + +- **Configure all relevant API sources** to ensure complete coverage of your API landscape +- **Implement a review process** for newly discovered APIs to ensure proper classification and ownership assignment +- **Integrate discovery with your CI/CD pipeline** to synchronize new APIs as they're deployed automatically +- **Establish clear ownership** for each API to ensure accountability for governance and maintenance + +## FAQs + + +The discovery process uses secure authentication methods for each provider and transmits data via encrypted channels. The agent requires minimal permissionsβ€”just enough to read API configurations. + + + +The discovery process is designed to be lightweight and non-intrusive, primarily reading configuration data rather than analyzing traffic, thereby minimizing any performance impact. + + +## Troubleshooting + + + +- Check the agent logs for authentication errors +- Verify the provider configuration in the governance agent config +- Ensure the agent has network access to the API source + + + +- Some API sources may not expose all metadata +- Check if the API definition in the source is complete +- Consider enhancing the API definition at the source + + + +- Verify the governance URL and token in the agent configuration +- Check network connectivity between the agent and governance hub +- Examine the agent logs for specific connection errors + + + diff --git a/tyk-governance/core-concepts.mdx b/tyk-governance/core-concepts.mdx new file mode 100644 index 000000000..5a961d2cc --- /dev/null +++ b/tyk-governance/core-concepts.mdx @@ -0,0 +1,278 @@ +--- +title: "Core Concepts" +description: "Detailed explanation of key technical concepts that form the foundation of Tyk Governance, including federated API management, governance rulesets, and the technical architecture." +keywords: "Tyk Governance, API Governance, Federated API Management, Governance Concepts" +sidebarTitle: "Core Concepts" +--- + +This section provides a detailed explanation of the key technical concepts that form the foundation of Tyk Governance. + +## What is Tyk Governance? + +Tyk Governance is a comprehensive API Governance Hub designed to provide centralized visibility, control, and policy enforcement across distributed API ecosystems. It enables organizations to establish and maintain consistent standards, security practices, and compliance requirements across multiple API gateways and management platforms. + +At its core, Tyk Governance is a federated control plane that sits above your existing API infrastructure, regardless of whether you're using Tyk exclusively or a mix of different API management solutions. It collects, analyzes, and governs API definitions from various sources, ensuring they adhere to your organization's standards and best practices. + +Tyk API Governance Architecture + +## Federated API Management + +Federated API management refers to the practice of managing APIs across multiple, distributed platforms while maintaining consistent governance, visibility, and control. + +Organizations struggle with API sprawl, with many lacking visibility into their total number of APIs. Multiple gateways across teams create security vulnerabilities, governance gaps, and inefficiency. This "API debt" results in inconsistent security protocols, missed reuse opportunities, and increased risk from shadow APIs. Large enterprises need a solution that balances centralized governance with team autonomyβ€”enabling visibility across different gateways while allowing teams to use their preferred tools. The ideal approach provides unified oversight without forcing consolidation, ensuring compliance while preserving innovation and operational independence. + +### Tyk Governance Solutions + +Tyk Governance addresses these challenges through: + +1. **Unified API Repository**: A central inventory of all APIs across different providers. +2. **Cross-Platform Policy Enforcement**: Consistent application of governance policies regardless of the underlying API provider. +3. **Automated Compliance Checking**: Continuous validation of APIs against organizational standards and regulatory requirements. +4. **Maturity Assessment**: Evaluation and scoring of APIs based on design, security, documentation, and performance criteria. +5. **Centralized Reporting**: Comprehensive visibility into API compliance and governance status. + +## Governance Rulesets + +A governance ruleset in Tyk Governance is a set of rules and standards that APIs must adhere to. These rulesets define the requirements for API design, security, documentation, and operational characteristics. + +### Ruleset Components + +1. **Rules**: Individual checks that validate specific aspects of an API definition. +2. **Severity Levels**: Categorization of rules by importance (error, warning, info). +3. **Validation Functions**: The specific logic used to evaluate API definitions against rules. +4. **Remediation Guidance**: Instructions on how to fix issues when rules are violated. + +### Spectral Ruleset Compatibility + +Tyk Governance rulesets are compatible with the [Spectral ruleset](https://meta.stoplight.io/docs/spectral/01baf06bdd05a-rulesets) format, a widely adopted API linting and governance standard. This compatibility offers several advantages: + +1. **Familiar Format**: Teams already using Spectral can easily migrate their existing rulesets. +2. **Ecosystem Integration**: Leverage the broader ecosystem of pre-built Spectral rules. +3. **Extensibility**: Create custom rules using the same format and functions as Spectral. +4. **IDE Integration**: Use existing Spectral plugins for popular code editors. + +The Spectral-compatible format allows for declarative rule definitions with given/then patterns, custom functions, and detailed error messaging. + +### Basic Ruleset Examples + +```yaml +# Security ruleset requiring HTTPS +owasp-security-hosts-https-oas3: + description: All server interactions MUST use the https protocol + severity: error + then: + function: owaspHostsHttps + +# Rate limiting ruleset +rate-limit-exists: + description: Ensure rateLimit exists under upstream + severity: error + given: "$['x-tyk-api-gateway'].upstream" + then: + - field: rateLimit + function: truthy +``` + +Rulesets can be customized to meet organizational needs and evolve as API best practices and security requirements change. + +## Supported API Providers + +Tyk Governance is designed to work with a wide range of API management platforms, allowing organizations to maintain governance regardless of their existing API infrastructure. + +### API Provider Compatibility + +| API Provider | Tested Version | Supported API Types | Supported Features | +| :--------------- | :-------------- | :------------- | :------------------------------------------------- | +| Tyk Dashboard | 5.3+ | Tyk OAS | Complete integration with all governance features | +| AWS API Gateway | All | Rest APIs | API definition export, OAS schema export | +| Azure | - | - | Coming Soon | +| Kong | - | - | Coming Soon | +| WSO2 | - | - | Coming Soon | + +### Integration Capabilities + +Tyk Governance integrates with these providers through specialized agents that: + +1. Connect to the platform's management APIs +2. Extract API definitions and configurations +3. Convert proprietary formats to OpenAPI Specification (OAS) +4. Apply Tyk-specific extensions where applicable +5. Synchronize definitions with the central governance repository + +### Future API Provider Support + +The Tyk Governance roadmap includes plans to expand support to additional platforms and API Types. + +## How It Works + +Tyk Governance operates through a distributed architecture that combines a centralized cloud-hosted governance service with distributed agents that run in your environments. + +How Tyk API Governance Works + +### Technical Architecture + +```mermaid +flowchart LR + subgraph "Tyk Cloud" + GS["Governance Service"] --- DB[(Database)] + GS --- RE["Rule Engine"] + GS <--> A4["Agent 4"] --- P4["Cloud Control Plane"] + end + + subgraph "Customer Environment" + A1["Agent 1"] --- P1["Tyk Dashboard (Self-Managed)"] + A2["Agent 2"] --- P2["AWS API Gateway"] + end + + A1 <-->|"TLS + Auth"| GS + A2 <-->|"TLS + Auth"| GS +``` + +### Hosted Service Model + +The Governance Core is a Tyk Cloud hosted and managed service, providing several benefits: + +1. **Zero Infrastructure Overhead**: No need to deploy and maintain governance infrastructure. +2. **Automatic Updates**: Always access the latest features and security patches. +3. **Scalable Performance**: Handles growing API ecosystems without additional configuration. +4. **High Availability**: Built-in redundancy and failover capabilities. + +### Customer-Hosted Agents + +While the core service is cloud-hosted, customers can host their own agents within their environments: + +1. **Credential Isolation**: Tyk Governance never directly accesses your API providers; all credentials remain within your environment. +2. **Network Security**: The agent requires accepting inbound traffic from the cloud-based governance dashboard. All communication between agents and the dashboard is secured via TLS encryption. +3. **Deployment Flexibility**: Deploy agents in any environment where they can access your API platforms. +4. **Lightweight Footprint**: Agents have minimal resource requirements and can run in containers or VMs. + +### Process Sequence + +```mermaid +sequenceDiagram + participant Agent + participant Governance + participant RuleEngine + + Agent->>Governance: Register with governance hub + Governance->>Agent: Issue authentication token + Governance->>Agent: Send sync request + Agent->>Agent: Get APIs from API providers + Agent->>Governance: Stream API definitions + Governance->>Governance: Store in repository + Governance->>RuleEngine: Validate against rules + RuleEngine->>Governance: Return validation results + Governance->>Governance: Generate reports +``` + +### Synchronization Mechanisms + +Tyk Governance uses a secure bidirectional streaming protocol for efficient synchronization: + +1. **Registration**: Agents register with the Governance Hub and establish a secure connection. +2. **Heartbeat**: Agents maintain a health check stream to indicate their status. +3. **Sync Request**: The Governance Hub can trigger a sync operation on demand or on schedule. +4. **Streaming Response**: Agents stream API definitions back to the governance hub as they are extracted. +5. **Incremental Updates**: Only changed APIs are synchronized to minimize network traffic. + +### Security Measures + +The synchronization between agents and the Governance service includes multiple security layers: + +1. **TLS Encryption**: All communications are encrypted using TLS 1.2+ to prevent eavesdropping. +2. **Authentication Tokens**: Agents authenticate using secure tokens that can be rotated and revoked. +3. **Minimal Privilege**: Agents use read-only access to API platforms whenever possible. +4. **Data Minimization**: Only API definitions and metadata are transmitted, not actual API traffic or payloads. +5. **Audit Logging**: All synchronization activities are logged for security monitoring. + +### Data Exchange + +The information exchanged between agents and the Governance service includes: + +1. **From Agent to Governance**: + - API definitions in OpenAPI format + - API metadata (name, version, endpoints, security schemes) + - Provider-specific configuration converted to standard formats + - Agent status and capability information + - Sync operation status and results + +2. **From Governance to Agent**: + - Sync requests and configuration + - Authentication tokens and renewal information + +Notably, the following are NOT transmitted: + +- API keys or credentials for accessing APIs +- Actual API request/response payloads +- Customer data processed by APIs +- Internal network information beyond what's in API definitions + +## Glossary of Terms + +### Agent + +A component that connects to API Providers (Tyk, AWS API Gateway, etc.) to extract and sync API definitions. + +### API Maturity + +A measure of how well an API adheres to best practices in design, security, documentation, and performance. + +### API Provider + +A system or platform where APIs are hosted or managed, which Tyk Governance can discover and monitor. Examples include Tyk Dashboard and AWS API Gateway. + +### API Repository + +A federated catalog that aggregates APIs from multiple API providers, providing a centralized view of all APIs within an organization. + +### Federated API Management + +An approach to managing APIs across multiple platforms and environments while maintaining centralized governance. + +### Label + +A key-value pair assigned to APIs or API providers for categorization and governance purposes. Examples include `domain:storefront`, `environment:production`, or `pii:true`. + +### Ruleset + +A collection of governance rules that can be applied to APIs to enforce best practices and compliance requirements. + +### Rule + +A specific condition that can be evaluated against APIs to ensure they meet governance standards. Rules include severity levels, messages, and descriptions. + +### Ruleset Template + +A predefined ruleset containing common governance rules that can be applied as a starting point for governance policies. + +### Governance Report + +A summary of API compliance with governance rules, identifying violations and suggesting remediations. + +### Violation + +An instance where an API fails to meet defined governance standards, categorized by severity level. + +### Compliance + +The degree to which an API adheres to defined governance policies and standards. + +### Remediation + +The structured process of addressing and resolving API governance violations. + +### Remediation Priority + +Indicates how urgently an API issue should be addressed based on its risk level and potential impact. This priority helps teams focus their efforts on the most critical issues first. + +### Risk Level + +A summary metric that reflects API governance compliance across multiple APIs, considering their adherence to selected governance rulesets. + +### Sync + +The process of extracting API definitions from management platforms and updating the governance repository. + +### Tyk-OAS Governance Extensions + +Tyk-specific extensions to the OpenAPI Specification that enable advanced governance features. \ No newline at end of file diff --git a/tyk-governance/governance-rulesets.mdx b/tyk-governance/governance-rulesets.mdx new file mode 100644 index 000000000..e507bf965 --- /dev/null +++ b/tyk-governance/governance-rulesets.mdx @@ -0,0 +1,404 @@ +--- +title: "Governance Rulesets" +description: "Define, manage, and enforce API standards across your organization through customizable rules that act as executable policies for API governance requirements." +keywords: "Tyk Governance, Rulesets, API Standards, Governance Policies, API Compliance" +sidebarTitle: "Governance Rulesets" +--- + +## Availability + +- Version: Available since v0.2 + +## Overview + +Governance Rulesets enable you to define, manage, and enforce API standards across your organization through customizable rules. These rulesets act as executable policies that define your organization's API governance requirements, helping you establish consistent standards for security, design, and documentation. + +### Key Benefits + +- **Standardize API Development**: Define consistent patterns and practices for all APIs +- **Centralize Governance Policies**: Maintain standards in a single location accessible to all teams +- **Customize to Your Needs**: Create organization-specific rules or use pre-built templates +- **Evolve Standards Gradually**: Adjust rule severity and scope as your governance program matures +- **Share Knowledge**: Embed best practices and remediation guidance directly in rules + +### Dependencies + +- Requires Tyk Governance v0.2 or higher + +## Quick Start + +In this tutorial, we'll create a simple governance ruleset that can be used to validate APIs. + +### Prerequisites + +- Access to Tyk Governance Hub + +### Step-by-Step + +1. **Access the Rulesets Section** + + Navigate to the Rulesets section in your Tyk Governance dashboard. + +2. **Create a New Ruleset** + + Click the "Create new ruleset" button to create a new ruleset. + + + +3. **Choose a Template** + + Select "Start from Template" and choose the "vacuum-owasp" template. + + + +4. **Customize Your Ruleset** + + Review the pre-configured rules. You can enable/disable specific rules or adjust their severity levels. + + + + Then, provide a name and description for your ruleset. + + + +5. **Save Your Ruleset** + + Click **Finish** to create your new ruleset. + +6. **View Your Ruleset** + + Your new ruleset will appear in the rulesets list. Click on it to view details and manage individual rules. + +### Validation + +- Successful ruleset creation will be confirmed with a success message +- The ruleset will appear in your rulesets list +- You can now use this ruleset to [evaluate APIs](/tyk-governance/api-evaluation) + +## How It Works + +Governance Rulesets use a powerful rule engine based on the Spectral format to define standards for API specifications. Each rule consists of a selector that identifies parts of the API specification to evaluate, a function that performs the evaluation, and metadata that provides context and remediation guidance. + +### Rule Structure + +A typical rule in a ruleset includes: + +- **Given**: A JSONPath expression that selects parts of the API specification +- **Then**: Functions to apply to the selected parts +- **Severity**: The importance level (error, warn, info, hint) +- **Message**: A description of what the rule checks +- **HowToFix**: Guidance on resolving any violations + +When you create a ruleset, you're defining a collection of these rules that work together to enforce your governance standards. + +### Example Rulesets + +#### Security Standards Ruleset + +Create rulesets that define security requirements for APIs, such as authentication requirements, secure endpoints, and protection against common vulnerabilities. + +```yaml +security-auth-required: + description: APIs must require authentication + severity: error + given: $.paths.*.* + then: + field: security + function: truthy + howToFix: "Add a security requirement to this operation" +``` + +#### API Design Standards Ruleset + +Define rules that enforce naming conventions, URL patterns, and response structures to maintain consistency across your API portfolio. + +```yaml +path-case-convention: + description: Path segments must use kebab-case + severity: warn + given: $.paths + then: + field: "@key" + function: pattern + functionOptions: + match: "^\/([a-z0-9-]+|{[a-zA-Z0-9_]+})(\/{[a-zA-Z0-9_]+}|\/[a-z0-9-]+)*$" + howToFix: "Rename path segments to use kebab-case (lowercase with hyphens)" +``` + +#### Documentation Standards Ruleset + +Create rules that check for complete and accurate documentation, including descriptions, examples, and response schemas. + +```yaml +operation-description: + description: All operations must have descriptions + severity: warn + given: $.paths.*.* + then: + field: description + function: truthy + howToFix: "Add a meaningful description to this operation" +``` + +## Manage Rulesets + +### Creating Rulesets + +Rulesets define governance standards and ensure API compliance with security, performance, and reliability requirements. You can create rulesets through the Governance UI or programmatically via the API. + +#### Using the UI + +The Governance UI provides a user-friendly interface for creating rulesets: + +1. Navigate to the Rulesets section +2. Click **Create new ruleset** +3. Choose how to create your ruleset (import from file, paste definition, or start from template) +4. Provide basic information (name, description) +5. Save your ruleset + +#### Using the API + +You can also create rulesets programmatically using the API. + +**Creating a Ruleset with JSON Payload** + +```sh +curl -X POST https://your-governance-instance.tyk.io/api/rulesets \ + -H "Content-Type: application/json" \ + -H "X-API-Key: YOUR_API_KEY" \ + -d '{ + "metadata": { + "name": "Security Standards", + "description": "Security rules for all APIs", + "active": true + }, + "ruleset": { + "rules": { + "security-auth-required": { + "description": "APIs must require authentication", + "severity": "error", + "given": "$.paths.*.*", + "then": { + "field": "security", + "function": "truthy" + }, + "howToFix": "Add a security requirement to this operation" + } + } + } + }' +``` + +**Creating Rulesets from Files** + +For more complex rulesets or when you maintain your rulesets as files in your development environment, you can create rulesets directly from files using a multipart form request. + +To create a ruleset from a file, you need to send a multipart form request with two key components: + +1. `metadata`: JSON object containing ruleset metadata (name, description, etc.) +2. `ruleset`: The ruleset definition file content (in YAML or JSON format) + +Here's how to create a ruleset from a YAML file using curl: + +```sh +curl -X POST https://your-governance-instance.tyk.io/api/rulesets \ + -H "X-API-Key: YOUR_API_KEY" \ + -F "metadata={\"name\":\"API Security Ruleset\",\"description\":\"Enforces API security best practices\",\"active\":true}" \ + -F "ruleset=@/path/to/your/ruleset.yaml" +``` + +### Managing Rulesets + +Once created, rulesets can be managed through the UI or API: + +#### Viewing Rulesets + +- Navigate to the Rulesets section to see all rulesets +- Click on a ruleset to view its details and rules +- Use search to find specific rulesets + +#### Editing Rulesets + +- From the ruleset details page, click **Configure ruleset** +- Modify ruleset metadata or individual rules +- Save your changes + +{/* #### Deleting Rulesets + +- From the ruleset details page, click "Delete" +- Confirm the deletion */} + +#### Using Templates + +- When creating a new ruleset, select **Start from Template** +- Choose from pre-built templates for common standards +- Customize the template to meet your specific needs + +### Testing Rulesets Against APIs + +After creating your ruleset, you'll want to test it against your APIs to ensure it correctly identifies compliance issues. You can test a ruleset through the Governance UI: + +1. Navigate to the Ruleset Details Page + + - Go to the **Rulesets** section and select the ruleset you want to test + +2. Run an Evaluation + + - In the ruleset details page, locate the "**Test ruleset**" section + - Select an API from the dropdown menu + - Click the "**Run ruleset**" button + + + +3. Review Results + + - The evaluation results will display any rule violations found in the API + - Results are categorized by severity (High, Medium, Low) + - Click "View issue info" on any violation to see detailed information, including: + - The specific rule that was violated + - The affected area in the API specification + - Guidance on how to fix the issue + + + + + +## Understanding Remediation Priority + +In Tyk Governance, "Remediation Priority" indicates the urgency with which an API issue should be addressed, based on its risk level and potential impact. This priority helps teams focus their efforts on the most critical issues first. + +### Severity Mapping + +Remediation priority is directly derived from the severity level defined in the rule. When a rule violation is detected during evaluation, its severity level is mapped to a corresponding remediation priority: + +| Severity Level | Remediation Priority | Visual Indicator | +| :---------------- | :---------------------- | :------------------ | +| error | High | Red pill | +| warn | Medium | Yellow/orange pill | +| info | Low | Green pill | + +## Use Cases + +### Establishing Tiered Governance Standards + +Create different rulesets for different API tiers based on criticality, allowing for appropriate governance without over-restricting less critical APIs. + +**Implementation:** + +1. Create a "Tier 1" ruleset with strict security, design, and documentation rules for mission-critical APIs +2. Create a "Tier 2" ruleset with moderate requirements for important but less critical APIs +3. Create a "Tier 3" ruleset with basic requirements for internal or non-critical APIs +4. Apply these rulesets selectively based on API classification + +**Benefits:** + +- Appropriate governance based on API importance +- More efficient use of development resources +- Clear expectations for different types of APIs + +### Implementing Industry-Specific Standards + +Create rulesets that enforce industry-specific regulations and best practices for APIs in regulated sectors. + +**Implementation:** + +1. Identify relevant industry standards (e.g., FAPI for financial services, HIPAA for healthcare) +2. Create rulesets that codify these standards as executable rules +3. Include detailed remediation guidance specific to the industry context +4. Apply these rulesets to APIs in the relevant domains + +**Benefits:** + +- Ensure compliance with industry regulations +- Reduce audit preparation time +- Standardize compliance approaches across teams + +### Evolving Governance Standards Over Time + +Use rulesets to gradually implement and evolve governance standards as your organization's API program matures. + +**Implementation:** + +1. Start with a basic ruleset focusing on critical security and fundamental design principles +2. Gradually add more rules as teams become familiar with the standards +3. Adjust severity levels over time (e.g., start as warnings, later promote to errors) +4. Incorporate feedback from development teams to refine rules + +**Benefits:** + +- Avoid overwhelming teams with too many rules at once +- Build governance maturity incrementally +- Gain buy-in through collaborative evolution + +## Best Practices and Recommendations + +- **Start with templates** for common standards like OWASP or OpenAPI best practices +- **Customize gradually** by adding organization-specific rules over time +- **Use appropriate severity levels** - reserve "error" for critical issues that must be fixed +- **Include clear remediation guidance** in the "howToFix" field for each rule +- **Group related rules** into focused rulesets (security, design, documentation) +- **Review and update rulesets regularly** as standards evolve +- **Collect feedback from developers** on rule clarity and usefulness +- **Document the purpose** of each ruleset for better organizational understanding +- **Maintain version control** for rulesets as they evolve +- **Assign ownership** to specific individuals or teams for each ruleset + +## FAQs + + + +Tyk Governance supports Spectral-compatible rulesets in both YAML and JSON formats. This makes it compatible with existing Spectral rulesets and allows for easy migration from other tools. + + + +Currently, Tyk Governance supports the standard functions provided by the Spectral/Vacuum engine. Custom functions are planned for future releases. + + + +There's no hard limit on the number of rules in a ruleset, but performance may degrade with very large rulesets (100+ rules). We recommend organizing related rules into separate rulesets for better manageability and performance. + + + +Yes, you can import existing Spectral rulesets in YAML or JSON format. This allows you to leverage your existing governance rules in Tyk Governance. + + + +Once you've created rulesets, you can use them to validate APIs through the [API Evaluation](/tyk-governance/api-evaluation) feature, which allows you to check API specifications against your governance standards. + + + +## Troubleshooting + + + +- Verify the ruleset is in valid YAML or JSON format +- Check that all required fields are present (given, then, severity) +- Ensure JSONPath expressions are valid +- Look for syntax errors in function options +- Try importing a smaller portion of the ruleset to identify problematic rules + + + +- Check that the rule definition follows the correct format +- Verify that the rule wasn't disabled during import +- Ensure the rule has a unique name within the ruleset +- Try adding the rule manually if it was part of an import + + + +- Test your JSONPath expression with a sample API specification +- Verify the syntax follows JSONPath standards +- Check for typos or missing elements in the path +- Consider simplifying complex expressions +- Use online JSONPath evaluators to debug expressions + + + +- Ensure you have the necessary permissions +- Check for validation errors in the ruleset definition +- Verify you're clicking the final save button after making changes +- Try refreshing the page and making changes again +- Check browser console for any JavaScript errors + + + diff --git a/tyk-governance/installation.mdx b/tyk-governance/installation.mdx new file mode 100644 index 000000000..38ec8660f --- /dev/null +++ b/tyk-governance/installation.mdx @@ -0,0 +1,534 @@ +--- +title: "Installation and Setup" +description: "Step-by-step instructions for installing and configuring Tyk Governance, including cloud-hosted options and deploying agents in your environment." +keywords: "Tyk Governance, Installation, Configuration, Agent Setup, Deployment" +sidebarTitle: "Installation" +--- + +This section moves from concepts to hands-on implementation, providing the practical steps needed to start with Tyk Governance. + +## Prerequisites + +Before beginning the installation and setup process for Tyk Governance, ensure your environment meets the following requirements: + +### License Requirements +- Valid Tyk license with Governance feature enabled + +### System Requirements + +**For Tyk Governance Hub:** +- No local system requirements as Tyk Governance is fully hosted and managed by Tyk in the cloud + +**For Tyk Governance Agent:** +- Minimum 1 CPU core +- 512MB RAM +- 1GB available disk space +- Linux-based operating system (Ubuntu 18.04+, CentOS 7+, or equivalent) +- Docker (if using containerized deployment) +- Kubernetes (optional, for orchestrated deployments) + +### Permission Requirements + +**For Agent Installation:** +- Read access to your API providers (Tyk, AWS API Gateway, etc.) +- Ability to create and manage containers or services in your environment +- Network configuration permissions to establish outbound connections +- Permission to create and manage secrets for storing API credentials + +### Network Requirements + +**For Tyk Governance Agent:** +- Inbound access from the Tyk Governance Hub (default 50051 for gRPC) +- Outbound access to your API providers +- Outbound HTTPS (port 443) access to the Tyk Governance Hub +- If API Provider gateways run on different networks, network routes must allow the agent to communicate with those networks + +## System Architecture + +Tyk Governance follows a cloud-hosted service model with customer-deployed agents, creating a secure and flexible architecture that respects your network boundaries while providing centralized governance. + +### High-Level Architecture + +```mermaid +flowchart LR + subgraph "Tyk Cloud" + GS["Governance Service"] --- DB[(Database)] + GS --- RE["Rule Engine"] + GS <--> A4["Agent 3"] --- P3["Cloud Control Plane"] + end + + subgraph "Customer Environment" + A1["Agent 1"] --- P1["Tyk Dashboard (Self-Managed)"] + A2["Agent 2"] --- P2["AWS API Gateway"] + end + + A1 <-->|"TLS + Auth"| GS + A2 <-->|"TLS + Auth"| GS +``` + +### Deployment Models + +#### Tyk Cloud with Automatic Agent + +```mermaid +flowchart LR + subgraph "Tyk Cloud" + GS["Governance Service"] + CP["Cloud Control Plane"] + AG["Auto-deployed Agent"] + + GS --- AG + AG --- CP + end + + User["User"] --> GS +``` + +**When to use this model:** +- You exclusively use Tyk Cloud for API management +- You want the simplest possible setup with minimal configuration +- You don't have any APIs on other platforms that need governance + +#### Tyk Cloud with Customer-Deployed Agents + +```mermaid +flowchart LR + subgraph "Tyk Cloud" + GS["Governance Service"] + end + + subgraph "Customer Environment" + A1["Agent"] --- TG["Tyk Dashboard (Self-Managed)"] + A2["Agent"] --- AWS["AWS API Gateway"] + end + + A1 <--> GS + A2 <--> GS + + User["User"] --> GS +``` + +**When to use this model:** +- You use self-managed Tyk deployments (not Tyk Cloud) +- You use AWS API Gateway or other supported providers +- You need to govern APIs across providers that aren't in Tyk Cloud + +#### Hybrid Deployment + +```mermaid +flowchart LR + subgraph "Tyk Cloud" + GS["Governance Service"] + CP["Cloud Control Plane"] + AG["Auto-deployed Agent"] + + GS --- AG + AG --- CP + end + + subgraph "Customer Environment" + A2["Agent"] --- AWS["AWS API Gateway"] + end + + A2 <--> GS + + User["User"] --> GS +``` + +**When to use this model:** +- You use a combination of Tyk Cloud and other API platforms +- You have a mix of cloud and on-premises API deployments +- You need comprehensive governance across your entire API ecosystem + +## Installation + +The installation process for Tyk Governance varies depending on whether you're an existing Tyk Cloud customer and which deployment model you use. + +### Requesting Access to Tyk Governance + +1. **Contact Tyk for Access** + - Reach out to your Tyk Account Manager or visit [tyk.io/contact-book-a-demo](https://tyk.io/contact-book-a-demo/) + - Specify that you're interested in access to Tyk Governance + - Provide information about your current API management environment + +2. **Receive Access Credentials** + - After your request is processed, you'll receive an email with: + - URL to access the Tyk Governance Hub + - Admin credentials for initial login + - Instructions for next steps + +3. **Initial Login** + - Navigate to the provided Governance Hub URL + - Enter the admin credentials from the email + - You'll be prompted to change your password on first login + +### Enabling Governance Feature for Cloud Control Planes + +For existing Tyk Cloud managed control planes, enabling governance is straightforward: + +1. **Log in to Tyk Cloud Dashboard** + - Navigate to your Tyk Cloud dashboard + - Ensure you have administrative privileges + +2. **Access Control Plane Settings** + - Select the Control Plane you want to enable governance for + - Click on "Edit Details" button + +3. **Enable Governance Feature** + - Locate the "Governance Agent" toggle + - Enable the feature + - Save your changes + +4. **Verification** + - An agent will be automatically deployed for your Tyk Control Plane + - You can now access the Governance dashboard via "Governance" in the Cloud UI sidebar + +### Installing a Local Agent + +For environments where you need to install agents manually (non-Tyk platforms or on-premises deployments), follow these steps: + +**Prerequisites for Agent Installation:** +- Access to the Governance Hub to generate agent tokens +- Network connectivity between the agent and both the Governance Hub and your API provider +- Docker or Kubernetes for container-based deployment (recommended) + +#### Generate Agent Token from Governance Hub UI + +1. From the Agents page, click the **New agent** button in the top-right corner. + + + +2. In the New agent form, enter: + + - **Name**: A descriptive name for the agent (required) + - **Description**: Details about the agent's purpose or location (required) + + Click **Create agent** to save the new agent. + + + +3. Click "Generate new access token" + + + +4. Use the copy icon to copy the token to your clipboard + + + + + +#### Generate Agent Token using API + +You can also use API to create a token. After receiving your Governance Hub credentials, follow these steps: + +1. **Obtain an API Key**: + - Log in to the Governance Hub using the credentials provided in your welcome email + - Check your Access key under the "Settings > User Profile" section + + + +2. **Create an Agent using the API**: + + ```bash + # Replace these values with your actual information + GOVERNANCE_URL="https://your-governance-instance.tyk.io" + API_KEY="your-access-key" + AGENT_NAME="My AWS Agent (US)" + + # Create agent first + curl -s -X POST --location "${GOVERNANCE_URL}/api/agents/" \ + -H "X-API-Key: ${API_KEY}" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "'"${AGENT_NAME}"'" + }' + ``` + + Example response that shows an agent is created in INACTIVE state: + + ```json + { + "id": "a51d9bd0-bafe-4749-8285-e18641b151f2", + "name": "My AWS agent (US)", + "description": "", + "last_heartbeat": "0001-01-01T00:00:00Z", + "status": "inactive", + "providers": null, + "token": "", + "version": "" + } + ``` + + ```bash + # Extract agent ID from response + AGENT_ID="a51d9bd0-bafe-4749-8285-e18641b151f2" + ``` + +3. **Generate an Agent Token using the API**: + + Now you can generate an access token for the agent. + + ```bash + # API call to create an agent token + curl -X POST "${GOVERNANCE_URL}/api/auth/token/" \ + -H "X-API-Key: ${API_KEY}" \ + -H "Content-Type: application/json" \ + -d '{ + "agent_id": "'"${AGENT_ID}"'" + }' + ``` + + Example response: + + ```json + { + "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + } + ``` + +4. **Save the token securely**: + + - Copy the `token` value from the response + - Store it securely, as you'll need it for agent configuration + - Note: This token cannot be retrieved later, so make sure to save it + +#### Prepare Configuration + +Create a configuration file named `agent-config.yaml` with the following structure: + +```yaml +#============================================================================== +# Tyk Governance Agent Configuration +#============================================================================== + +# Your Tyk Governance license key - required for agent authentication +# This is provided by Tyk when you subscribe to the Governance service +licenseKey: "your-tyk-governance-license-key" + +# Configuration for connecting to the Tyk Governance dashboard/service +governanceDashboard: + server: + # The gRPC endpoint URL of the Tyk Governance service + # Format: hostname:port (without protocol) + # This is in the format of prefixing "grpc-" to your Governance Hub URL. + url: "grpc-your-governance-instance.tyk.io:443" + + auth: + # Authentication token for this agent + # Generated via API call to /auth/token endpoint + # This token identifies and authorizes this specific agent + token: "my-agent-token" + +#============================================================================== +# API Provider Configurations +#============================================================================== +# List of API providers this agent will connect to +# Each agent can connect to multiple providers of different types +instances: + #-------------------------------------------------------------------------- + # Tyk Provider Configuration + #-------------------------------------------------------------------------- + - name: "tyk-provider" # Descriptive name for this provider instance + type: "tyk" # Provider type: must be "tyk" for Tyk Dashboard + config: + # The URL of your Tyk Dashboard + # For Kubernetes deployments, this might be an internal service URL + host: "http://dashboard-svc-tyk-stack-tyk-dashboard.tyk.svc.cluster.local:3000" + + # API key with read access to the Tyk Dashboard + # Can be obtained in Tyk Dashboard under "User" > "User Details": "Tyk Dashboard API Access Credentials" + # Requires read permissions for APIs and policies + auth: "your-auth-key" + + #-------------------------------------------------------------------------- + # AWS API Gateway Provider Configuration + #-------------------------------------------------------------------------- + - name: "aws-provider" # Descriptive name for this AWS API Gateway instance + type: "aws" # Provider type: must be "aws" for AWS API Gateway + config: + # AWS IAM credentials with permissions to list and get API Gateway resources + # Recommended: Use an IAM role with minimal required permissions + accessKeyId: "your-aws-access-key-id" + accessKeySecret: "your-aws-access-key-secret" + + # AWS region where your API Gateway APIs are deployed + # Example: us-east-1, eu-west-1, ap-southeast-2, etc. + region: "us-east-1" + + # Optional: Temporary session token if using temporary credentials + # Required only when using AWS STS temporary credentials + sessionToken: "your-aws-session-token" + +#============================================================================== +# Agent Settings +#============================================================================== + +# Log level controls verbosity of agent logs +# Options: debug, info, warn, error +# Recommended: info for production, debug for troubleshooting +logLevel: debug + +# Health probe configuration for monitoring agent health +# Used by container orchestration systems like Kubernetes +healthProbe: + server: + # Port on which the health probe server will listen + # Ensure this port is not used by other services + port: 5959 +``` + +#### Deploy the Agent + +**Docker Deployment:** + +```bash +# Replace it with your Tyk Governance license key +LICENSE_KEY="tyk-governance-license-key" + +# Replace with an available version tag +VERSION="latest" + +docker run -d --name tyk-governance-agent \ + -v $(pwd)/agent-config.yaml:/app/config.yaml \ + -e TYK_AGENT_LICENSEKEY="$LICENSE_KEY" \ + tykio/governance-agent:$VERSION +``` + +**Kubernetes Deployment:** + +1. Create a Kubernetes secret for the configuration: + +```bash +kubectl create secret generic agent-config \ + --from-file=config.yaml=./agent-config.yaml \ + -n your-namespace +``` + +2. Apply the following manifest: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: governance-agent + namespace: your-namespace # Replace with your namespace +spec: + replicas: 1 + selector: + matchLabels: + app: tyk-governance-agent + template: + metadata: + labels: + app: tyk-governance-agent + spec: + containers: + - name: agent + image: tykio/governance-agent:latest # Replace with an available version tag + env: + - name: TYK_AGENT_LICENSEKEY + value: your-governance-license #Replace with your license key + ports: + - name: health + containerPort: 5959 + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: health + readinessProbe: + httpGet: + path: /live + port: health + volumeMounts: + - mountPath: /app/config.yaml + name: agent-config + subPath: config.yaml + volumes: + - name: agent-config + secret: + secretName: agent-config + items: + - key: config.yaml + path: config.yaml +``` + +Apply with: + +```bash +kubectl apply -f agent-deployment.yaml +``` + +#### Verify Agent Connection + +1. Check if the agent is running properly: + + ```bash + # For Docker + docker logs tyk-governance-agent + + # For Kubernetes + kubectl logs -l app=tyk-governance-agent -n your-namespace + ``` + + 2. Look for log messages indicating a successful connection: + + ``` + Starting license validation... + License validated successfully. Valid till: ... + starting agent + agent started successfully + waiting agent to establish health check + starting health probes HTTP server","addr":":5959 + authenticated and established health stream + health check established, waiting for sync stream + agent registered successfully and established sync stream with governance dashboard + waiting for sync requests from the dashboard + ``` + +#### Trigger Initial Sync + +1. In the Governance Hub, navigate to "API Repository" +2. Click the "ReSync" button to initiate synchronisation + + + +3. Monitor the sync progress in the UI or refresh the page manually. + +## Examples + +The following examples demonstrate common deployment scenarios and configurations for Tyk Governance. + +### Example 1: Tyk Cloud with Automatic Agent + +This is the simplest deployment model for existing Tyk Cloud customers. + +**Configuration Steps:** + +1. Requesting Access to Tyk Governance +2. Enable the Governance feature in Tyk Cloud Control Plane as described in [Enabling Governance Feature for Cloud Control Planes](#enabling-governance-feature-for-cloud-control-planes) +3. Wait for automatic agent deployment +4. Access the Governance Hub from the Cloud UI sidebar +5. Navigate to "API Repository" to view your automatically discovered APIs +6. Trigger "ReSync" to pull updates from the control planes + +**Expected Outcome:** +- All APIs from your Tyk Control Plane will be automatically discovered and displayed in the API Repository + +### Example 2: Multi-Platform Governance with Custom Agents + +This example demonstrates how to set up governance across multiple API providers. + +**Configuration Steps:** + +1. Requesting Access to Tyk Governance +2. Generate agent tokens for each provider as described in [Installing a Local Agent](#installing-a-local-agent) +3. Create configuration files for each agent +4. Deploy each agent using Docker or Kubernetes as described in [Installing a Local Agent](#installing-a-local-agent) +5. Verify agent connections +6. Access the Governance Hub with the provided URL +7. Navigate to "API Repository" to view your automatically discovered APIs +8. Trigger "ReSync" to pull updates from all agents + +**Expected Outcome:** +- APIs from all providers will be discovered and displayed in a unified repository diff --git a/tyk-governance/overview.mdx b/tyk-governance/overview.mdx new file mode 100644 index 000000000..deaf0032e --- /dev/null +++ b/tyk-governance/overview.mdx @@ -0,0 +1,66 @@ +--- +title: "Tyk Governance Overview" +description: "Introduction to Tyk Governance, a universal API governance hub that enables organizations to establish, enforce, and monitor governance policies across multiple API platforms and gateways." +keywords: "Tyk Governance, API Governance, API Management" +sidebarTitle: "Overview" +--- + +## Overview + +Tyk Governance is a universal API governance hub that enables organizations to establish, enforce, and monitor governance policies across multiple API platforms and gateways. It solves the challenge of fragmented API management by providing a centralized approach to governance, regardless of where your APIs are hosted or which technologies they use. + +In today's complex API ecosystems, organizations struggle with inconsistent standards, security vulnerabilities, and compliance gaps across different API platforms. Tyk Governance bridges these gaps by creating a unified governance layer that works seamlessly with Tyk and extends to third-party API platforms, such as AWS API Gateway. + +Tyk Governance provides centralized visibility and organizations across different API platforms + +## Key Benefits + +* **Universal Governance** - Define and enforce consistent policies across multiple API platforms and styles (REST, GraphQL, event-driven) from a single control plane +* **Reduced Duplication** - Identify redundant or shadow APIs across different departments, reducing maintenance costs and security risks +* **Shift-Left Governance** - Catch governance violations during design and development, not after deployment, reducing rework by up to 60% +* **Collaborative Improvement** - Enable teams to work together with shared visibility and clear ownership of APIs across the organization +* **Measurable API Maturity** - Track and improve API quality with quantifiable metrics across technical excellence, business impact, and developer experience + +## Who Should Use Tyk Governance + +Tyk Governance transforms API governance from a fragmented, post-deployment concern into a proactive, continuous, and scalable process across your entire API ecosystem. + +```mermaid +flowchart LR + A[Enterprise Architects and Security Leads] -->|Define Policies| B[Tyk Governance] + C[Platform Engineers] -->|Integrate Tools| B + B -->|Provide Feedback| D[API Developers] + D -->|Create Compliant APIs| B + B -->|Compliance Reporting| A[Enterprise Architects and Security Leads] +``` + +### Enterprise Architects & Security Leads + +Enterprise architects and security leads use Tyk Governance to establish organization-wide standards and ensure strategic alignment of the API program. They benefit from: + +* Centralized visibility across all API platforms +* Comprehensive compliance reporting +* The ability to define tiered governance policies based on API criticality +* Pre-built governance templates aligned with industry standards + +**Example:** An enterprise architect at a financial services company uses Tyk Governance to ensure all customer-facing APIs comply with security and regulatory requirements, while allowing internal APIs to follow a lighter governance model. + +### Platform Engineers + +Platform engineers leverage Tyk Governance to build and maintain internal developer platforms that streamline API development. They value: + +* Seamless integration of governance into CI/CD pipelines +* Self-service tools that empower developers +* Automated API discovery with scheduled synchronization + +**Example:** A platform engineer integrates Tyk Governance into Tyk Dashboard and other API platforms, providing API templates that automatically incorporate security best practices and compliance requirements. + +### API Developers + +API developers rely on Tyk Governance to design and implement APIs that meet organizational standards from day one. They appreciate: + +* Clear guidance on governance requirements +* Real-time feedback during development +* Reduced rework and faster release cycles + +**Example:** An API developer receives immediate feedback that their new payment API is missing required rate-limiting policies, allowing them to fix the issue before submitting for review. \ No newline at end of file diff --git a/tyk-identity-broker/tib-rest-api.mdx b/tyk-identity-broker/tib-rest-api.mdx new file mode 100644 index 000000000..a4b46a607 --- /dev/null +++ b/tyk-identity-broker/tib-rest-api.mdx @@ -0,0 +1,275 @@ +--- +title: "TIB REST API" +description: "Tyk Identity Broker (TIB) REST API, including endpoints for managing identity profiles and configurations." +order: 0 +sidebarTitle: "Tyk Identity Broker" +--- + +The Tyk Identity Broker (TIB) has an API to allow policies to be created, updated, removed and listed for programmatic and automated access. TIB also has a "flush" feature that enables you to flush the current configuration to disk for use when the client starts again. + +TIB does not store profiles in a shared store, so if you have multiple TIB instances, they need to be configured individually (for now). Since we don't expect TIB stores to change often, this is acceptable. + +Starting from Tyk Dashboard 3, TIB is built-in to the dashboard. TIB endpoints are exposed as part of dashboard API on the `/api/tib/` prefix. So if in the guide below external TIB API endpoint is `/api/profiles` the similar endpoint on the dashboard API will be `/api/tib/profiles`. + + +## List Profiles + +```{.copyWrapper} +GET /api/profiles/ +Authorization: test-secret + +{ + "Status": "ok", + "ID": "", + "Data": [ + { + "ActionType": "GenerateTemporaryAuthToken", + "ID": "11", + "IdentityHandlerConfig": { + "DashboardCredential": "822f2b1c75dc4a4a522944caa757976a", + "DisableOneTokenPerAPI": false, + "TokenAuth": { + "BaseAPIID": "e1d21f942ec746ed416ab97fe1bf07e8" + } + }, + "MatchedPolicyID": "5654566b30c55e3904000003", + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "ExrtactUserNameFromBasicAuthHeader": true, + "OKCode": 200, + "OKRegex": "origin", + "OKResponse": "ewogICJvcmlnaW4iOiAiNjIuMjMyLjExNC4yNTAsIDE3OC42Mi4xMS42MiwgMTc4LjYyLjExLjYyIgp9Cg==", + "TargetHost": "http://sharrow.tyk.io/ba-1/" + }, + "ProviderConstraints": { + "Domain": "", + "Group": "" + }, + "ProviderName": "ProxyProvider", + "ReturnURL": "", + "Type": "passthrough" + }, + { + "ActionType": "GenerateOAuthTokenForClient", + "ID": "6", + "IdentityHandlerConfig": { + "DashboardCredential": "{DASHBAORD-API-ID}", + "DisableOneTokenPerAPI": false, + "OAuth": { + "APIListenPath": "{API-LISTEN-PATH}", + "BaseAPIID": "{BASE-API-ID}", + "ClientId": "{TYK-OAUTH-CLIENT-ID}", + "RedirectURI": "http://{APP-DOMAIN}:{PORT}/{AUTH-SUCCESS-PATH}", + "ResponseType": "token", + "Secret": "{TYK-OAUTH-CLIENT-SECRET}" + } + }, + "MatchedPolicyID": "POLICY-ID", + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "FailureRedirect": "http://{APP-DOMAIN}:{PORT}/failure", + "LDAPAttributes": [], + "LDAPUseSSL": false, + "LDAPPort": "389", + "LDAPServer": "localhost", + "LDAPUserDN": "cn=*USERNAME*,cn=dashboard,ou=Group,dc=ldap,dc=tyk-ldap-test,dc=com" + } + "ProviderName": "ADProvider", + "ReturnURL": "", + "Type": "passthrough" + } + ] +} +``` + +## Add Profile + +### Sample Request + +```{.copyWrapper} +POST /api/profiles/{id} +Authorization: test-secret + +{ + "ActionType": "GenerateTemporaryAuthToken", + "ID": "11", + "IdentityHandlerConfig": { + "DashboardCredential": "822f2b1c75dc4a4a522944caa757976a", + "DisableOneTokenPerAPI": false, + "TokenAuth": { + "BaseAPIID": "e1d21f942ec746ed416ab97fe1bf07e8" + } + }, + "MatchedPolicyID": "5654566b30c55e3904000003", + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "ExrtactUserNameFromBasicAuthHeader": true, + "OKCode": 200, + "OKRegex": "origin", + "OKResponse": "ewogICJvcmlnaW4iOiAiNjIuMjMyLjExNC4yNTAsIDE3OC42Mi4xMS42MiwgMTc4LjYyLjExLjYyIgp9Cg==", + "TargetHost": "http://sharrow.tyk.io/ba-1/" + }, + "ProviderConstraints": { + "Domain": "", + "Group": "" + }, + "ProviderName": "ProxyProvider", + "ReturnURL": "", + "Type": "passthrough" +} +``` + +### Sample Response + +``` +{ + "Status": "ok", + "ID": "11", + "Data": { + "ID": "11", + "OrgID": "53ac07777cbb8c2d53000002", + "ActionType": "GenerateTemporaryAuthToken", + "MatchedPolicyID": "5654566b30c55e3904000003", + "Type": "passthrough", + "ProviderName": "ProxyProvider", + "ProviderConfig": { + "ExrtactUserNameFromBasicAuthHeader": true, + "OKCode": 200, + "OKRegex": "origin", + "OKResponse": "ewogICJvcmlnaW4iOiAiNjIuMjMyLjExNC4yNTAsIDE3OC42Mi4xMS42MiwgMTc4LjYyLjExLjYyIgp9Cg==", + "TargetHost": "http://sharrow.tyk.io/ba-1/" + }, + "IdentityHandlerConfig": { + "DashboardCredential": "822f2b1c75dc4a4a522944caa757976a", + "DisableOneTokenPerAPI": false, + "TokenAuth": { + "BaseAPIID": "e1d21f942ec746ed416ab97fe1bf07e8" + } + }, + "ProviderConstraints": { + "Domain": "", + "Group": "" + }, + "ReturnURL": "" + } +} +``` + +## Update Profile + +### Sample Request + +```{.copyWrapper} +PUT /api/profiles/{id} +Authorization: test-secret + +{ + "ActionType": "GenerateTemporaryAuthToken", + "ID": "11", + "IdentityHandlerConfig": { + "DashboardCredential": "822f2b1c75dc4a4a522944caa757976a", + "DisableOneTokenPerAPI": false, + "TokenAuth": { + "BaseAPIID": "e1d21f942ec746ed416ab97fe1bf07e8" + } + }, + "MatchedPolicyID": "5654566b30c55e3904000003", + "OrgID": "53ac07777cbb8c2d53000002", + "ProviderConfig": { + "ExrtactUserNameFromBasicAuthHeader": true, + "OKCode": 200, + "OKRegex": "origin", + "OKResponse": "ewogICJvcmlnaW4iOiAiNjIuMjMyLjExNC4yNTAsIDE3OC42Mi4xMS42MiwgMTc4LjYyLjExLjYyIgp9Cg==", + "TargetHost": "http://sharrow.tyk.io/ba-1/" + }, + "ProviderConstraints": { + "Domain": "", + "Group": "" + }, + "ProviderName": "ProxyProvider", + "ReturnURL": "", + "Type": "passthrough" +} +``` + +### Sample Response + +``` +{ + "Status": "ok", + "ID": "11", + "Data": { + "ID": "11", + "OrgID": "53ac07777cbb8c2d53000002", + "ActionType": "GenerateTemporaryAuthToken", + "MatchedPolicyID": "5654566b30c55e3904000003", + "Type": "passthrough", + "ProviderName": "ProxyProvider", + "ProviderConfig": { + "ExrtactUserNameFromBasicAuthHeader": true, + "OKCode": 200, + "OKRegex": "origin", + "OKResponse": "ewogICJvcmlnaW4iOiAiNjIuMjMyLjExNC4yNTAsIDE3OC42Mi4xMS42MiwgMTc4LjYyLjExLjYyIgp9Cg==", + "TargetHost": "http://sharrow.tyk.io/ba-1/" + }, + "IdentityHandlerConfig": { + "DashboardCredential": "822f2b1c75dc4a4a522944caa757976a", + "DisableOneTokenPerAPI": false, + "TokenAuth": { + "BaseAPIID": "e1d21f942ec746ed416ab97fe1bf07e8" + } + }, + "ProviderConstraints": { + "Domain": "", + "Group": "" + }, + "ReturnURL": "" + } +} +``` + +## Delete Profile + +### Sample Request + +```{.copyWrapper} +Delete /api/profiles/{id} +Authorization: test-secret + +[emtpy body] + +``` + +### Sample Response + +``` +{ + "Status": "ok", + "ID": "200", + "Data": {} +} +``` + +## Save Profiles to Disk + +### Sample Request + +```{.copyWrapper} +POST /api/profiles/save +Authorization: test-secret +[empty body] +``` + +### Sample Response + +``` +{ + "Status": "ok", + "ID": "", + "Data": {} +} +``` + +## Outcome + +The existing `profiles.json` file will be backed up to a new file, and the current profiles data in memory will be flushed to disk as the new `profiles.json` file. Backups are time stamped (e.g. `profiles_backup_1452677499.json`). diff --git a/tyk-mdcb-api.mdx b/tyk-mdcb-api.mdx new file mode 100644 index 000000000..ad5d0b809 --- /dev/null +++ b/tyk-mdcb-api.mdx @@ -0,0 +1,11 @@ +--- +title: "Tyk MDCB API" +description: "Tyk MDCB API documentation. This page provides details on how to use the Tyk Multi Data Center Bridge (MDCB) API for monitoring connected Data Planes and accessing diagnostic data." +keywords: "OpenAPI Spec, OpenAPI Specification, OAS, REST, Tyk MDCB OpenAPI Spec, Tyk MDCB OAS, MDCB API REST" +order: 3 +sidebarTitle: "Overview" +--- + +This API provides operations for monitoring Data Planes connected to MDCB and accessing diagnostic data. +It includes endpoints for retrieving connected data plane details, performing health checks, +and accessing Go's built-in pprof diagnostics for advanced performance profiling. \ No newline at end of file diff --git a/tyk-multi-data-centre/mdcb-configuration-options.mdx b/tyk-multi-data-centre/mdcb-configuration-options.mdx new file mode 100644 index 000000000..32c58d1a5 --- /dev/null +++ b/tyk-multi-data-centre/mdcb-configuration-options.mdx @@ -0,0 +1,36 @@ +--- +title: "MDCB Configuration options" +description: "Each of the config options that are available when deploying MDCB." +keywords: "MDCB, configuration options, MDCB configuration options" +order: 3 +sidebarTitle: "Multi Data Center Bridge" +--- + +import MdcbConfig from '/snippets/mdcb-config.mdx'; + +## Tyk MDCB Configuration + +The Tyk MDCB server is configured primarily via the `tyk_sink.conf` file, this file resides in `/opt/tyk-sink` on most systems, but can also live anywhere and be directly targeted with the `-c` flag. + +### Environment Variables + +Environment variables (env var) can be used to override the settings defined in the configuration file. Where an environment variable is specified, its value will take precedence over the value in the configuration file. + +### Default Ports + +| Application | Port | +| :------------------------- | :---------------- | +|MongoDB | 27017 | +|Redis | 6379 | +|**Tyk Dashboard** | | +|Developer Portal | 3000 | +|Admin Dashboard | 3000 | +|Admin Dashboard API | 3000 | +|**Tyk Gateway** | | +|Management API | 8080 | +|**MDCB** | | +|RPC services | 9090 | +|HTTP endpoints | 8181 | + + + diff --git a/tyk-open-source.mdx b/tyk-open-source.mdx new file mode 100644 index 000000000..3aa22f0fc --- /dev/null +++ b/tyk-open-source.mdx @@ -0,0 +1,28 @@ +--- +title: "Tyk Open Source" +description: "This page serves as a comprehensive guide to Tyk Open Source" +keywords: "installation, migration, open source" +sidebarTitle: "Overview" +--- + +## What is Tyk Open Source + +Open source is at the heart of what we do. Anything that is API Gateway-related lives in the Gateway, or is critical for the Gateway to work is open and freely available via our [Github](https://github.com/TykTechnologies/tyk). + +The Tyk Gateway is fully open-source. It's all the same Gateway that's used by you (the community!), by our enterprise products, as well as our SaaS. + +Our commitment to open source also delivers a host of benefits for our users: sign up for free with Tyk, receive securely packaged open source packages, get started guides, access to our community and all of the latest open source information. + + + +Tyk OSS, Tyk Open Source, Tyk Gateway, Tyk CE + + + +OSS-Guide + +## What Does Tyk Open Source Include? + +import OssProductListInclude from '/snippets/oss-product-list-include.mdx'; + + diff --git a/tyk-oss-gateway.mdx b/tyk-oss-gateway.mdx new file mode 100644 index 000000000..addcbdcc9 --- /dev/null +++ b/tyk-oss-gateway.mdx @@ -0,0 +1,16 @@ +--- +title: "Tyk Gateway Open Source (OSS)" +description: "Overview of Tyk Gateway features and deployment options." +order: 1 +sidebarTitle: "Tyk Gateway" +--- + +import TykGatewayFeaturesInclude from '/snippets/tyk-gateway-features-include.mdx'; + +## What is the Tyk Gateway? + + + +## Deployment Options + +Refer the [deployment options](/apim) page. \ No newline at end of file diff --git a/tyk-oss-gateway/configuration.mdx b/tyk-oss-gateway/configuration.mdx new file mode 100644 index 000000000..8cbdf5366 --- /dev/null +++ b/tyk-oss-gateway/configuration.mdx @@ -0,0 +1,26 @@ +--- +title: "Tyk Gateway Configuration Options" +description: "Configuration options and environment variables for Tyk Gateway." +order: 1 +sidebarTitle: "Gateway" +--- + +import GatewayConfig from '/snippets/gateway-config.mdx'; + +You can use environment variables to override the config file for the Tyk Gateway. The Gateway configuration file can be found in the `tyk-gateway` folder and by default is called `tyk.conf`, though it can be renamed and specified using the `--conf` flag. Environment variables are created from the dot notation versions of the JSON objects contained with the config files. +To understand how the environment variables notation works, see [Environment Variables](/tyk-oss-gateway/configuration). + +All the Gateway environment variables have the prefix `TYK_GW_`. The environment variables will take precedence over the values in the configuration file. + +### tyk lint + +In **v2.4** we have added a new `tyk lint` command which will validate your `tyk.conf` file and validate it for syntax correctness, misspelled attribute names or format of values. The Syntax can be: + +`tyk lint` or `tyk --conf=path lint` + +If `--conf` is not used, the first of the following paths to exist is used: + +`./tyk.conf` +`/etc/tyk/tyk.conf` + + diff --git a/tyk-overview.mdx b/tyk-overview.mdx new file mode 100644 index 000000000..dc0ac27ae --- /dev/null +++ b/tyk-overview.mdx @@ -0,0 +1,65 @@ +--- +title: "Tyk Overview" +description: "Explaining Tyk at a high level" +keywords: "Tyk API Management, Getting Started, Tutorials" +order: 5 +sidebarTitle: "Tyk Overview" +--- + +APIs are central to enabling software integration, data exchange, and automation. However, as organizations scale their API ecosystems, they face mounting challenges around security, reliability, and performance. Tyk exists to simplify and strengthen this process. With a focus on efficient, secure, and scalable API management, Tyk provides a powerful solution for companies looking to streamline API operations, enforce robust security standards, and gain deep visibility into their API usage. + +## Why Tyk Exists: The Need for API Management +The demand for APIs has exploded over the last decade, with companies using them to enable everything from mobile apps and IoT devices to microservices architectures and third-party integrations. But with this growth come significant challenges: + +- **Security Risks**: Exposing services through APIs introduces new security vulnerabilities that need constant management and monitoring. +- **Scalability**: As usage grows, APIs need to be resilient, able to handle high traffic, and scalable across global regions. +- **Complexity in Integration**: Integrating various backend services, identity providers, and front-end applications can become an overwhelming task. +- **Monitoring and Performance**: API performance monitoring, traffic management, and analytics are crucial to optimize API usage and provide reliable service. + +Tyk exists to address these challenges by providing an API management platform that’s secure, scalable, flexible, and easy to use. With Tyk, organizations can confidently manage the entire lifecycle of their APIs, from initial design to deployment and ongoing monitoring. + +## What Problem Does Tyk Solve? + +Tyk is designed to solve several critical issues that organizations face with APIs: + +1. **Unified API Management** + Tyk centralizes all aspects of API management, offering tools for routing, load balancing, security, and performance. This unified approach helps teams streamline API operations and reduce operational overhead. + +2. **Enhanced Security and Compliance** + APIs are vulnerable to numerous security threats. Tyk addresses these concerns by supporting a wide array of security protocols, including OAuth2.0, JWT, HMAC, and OpenID Connect. Additionally, Tyk enables organizations to enforce fine-grained access control policies, rate limiting, and quotas to safeguard API access. + +3. **Scalability for High-Volume Traffic** + Tyk provides a high-performance API gateway that can handle substantial traffic loads while maintaining low latency, ensuring that APIs can scale as demand increases. Tyk’s Multi Data Centre Bridge (MDCB) further enhances scalability by distributing traffic across multiple regions, providing high availability and low latency globally. + +4. **Seamless Integration and Flexibility** + Tyk’s open-source architecture and compatibility with Kubernetes, Docker, and cloud platforms make it easy to integrate within existing infrastructures. With Tyk, teams can operate in hybrid or multi-cloud environments, deploy APIs as Kubernetes-native resources, and leverage CI/CD pipelines for seamless updates. + +5. **Developer and Consumer Enablement** + Through the Tyk Developer Portal, developers can discover and access APIs easily, enabling faster adoption and integration. With detailed documentation, developer self-service features, and API analytics, Tyk empowers both API providers and consumers to make the most of their API ecosystem. + +## How Tyk’s Components Work Together + +Tyk offers a comprehensive suite of components designed to address every aspect of the API lifecycle: + +- **[Tyk Gateway](/tyk-oss-gateway)**: The core of Tyk’s platform, providing high-performance API routing, traffic management, and security. +- **[Tyk Dashboard](/api-management/dashboard-configuration)**: A graphical control panel that simplifies API management, configuration, and monitoring. +- **[Tyk Developer Portal](/portal/overview/intro)**: A self-service portal that enables developers to access, understand, and integrate with APIs. +- **[Tyk Multi Data Centre Bridge (MDCB)](/api-management/mdcb)**: Allows centralized control over APIs distributed across multiple data centers or cloud regions. +- **[Tyk Pump](/api-management/tyk-pump)**: Collects and streams analytics from the Tyk Gateway to various storage backends for performance monitoring and reporting. +- **[Tyk Operator](/api-management/automations/operator#what-is-tyk-operator)**: Kubernetes-native API management that allows teams to manage APIs as Kubernetes resources. +- **[Tyk Streams](/api-management/event-driven-apis#)**: Enables real-time data streaming and push-based communication for applications requiring live data. +- **[Tyk Sync](/api-management/automations/sync)**: Synchronizes API configurations across environments, supporting DevOps practices and CI/CD workflows. +- **[Tyk Identity Broker](/api-management/external-service-integration#what-is-tyk-identity-broker-tib)**: Integrates with external identity providers for single sign-on (SSO) and centralized identity management. +- **[Tyk Helm Charts](/product-stack/tyk-charts/overview)**: Simplifies the deployment of Tyk components within Kubernetes environments. +- **[Universal Data Graph](/api-management/data-graph#overview)**: Provides a single GraphQL endpoint that aggregates data from multiple sources, simplifying access to complex data. + +Each component plays a specific role in managing the API lifecycle, from initial deployment and configuration to real-time data streaming and developer access. Together, they create a cohesive API management ecosystem that can handle the unique challenges of production environments. + +You can learn more about the components that make up Tyk, [here](/tyk-components). + +## Why Use Tyk? + +In summary, Tyk offers a complete API management solution designed for modern, production-grade API operations. With its open-source core, robust security options, high performance, and flexible deployment models, Tyk provides everything an organization needs to manage, scale, and secure their APIs. + +Whether you’re a startup looking to build a simple API or a global enterprise deploying complex, multi-region architectures, Tyk has the tools to support your growth at every stage. If you face problems with scaling your solutions, learn more about how Tyk can support you by [getting started with Tyk Cloud](/getting-started/create-account). + diff --git a/tyk-portal-api.mdx b/tyk-portal-api.mdx new file mode 100644 index 000000000..137a601d6 --- /dev/null +++ b/tyk-portal-api.mdx @@ -0,0 +1,20 @@ +--- +title: "Classic Portal API" +description: "Landing page for the Tyk Classic Portal API documentation" +keywords: "Tyk Classic Portal API, Classic Portal API" +noindex: True +sidebarTitle: "Overview" +--- + +import LegacyClassicPortalApi from '/snippets/legacy-classic-portal-api.mdx'; + + + + +This section describes the Tyk Classic Portal API endpoints. It includes the following: + +* [Portal Keys](/tyk-apis/tyk-portal-api/portal-keys) +* [Portal Policies](/tyk-apis/tyk-dashboard-api/portal-policies) +* [Portal Developers](/tyk-apis/tyk-portal-api/portal-developers) +* [Portal Configuration](/tyk-apis/tyk-portal-api/portal-configuration) +* [Portal Documentation](/tyk-apis/tyk-portal-api/portal-documentation) \ No newline at end of file diff --git a/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables.mdx b/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables.mdx new file mode 100644 index 000000000..d447646b1 --- /dev/null +++ b/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables.mdx @@ -0,0 +1,16 @@ +--- +title: "Tyk Pump Environment Variables" +description: "Using Environment Variables to configure your Tyk Pump" +keywords: "Tyk Pump, Envoronment Variables, Configuration" +order: 6 +sidebarTitle: "Pump" +--- + +import PumpConfig from '/snippets/pump-config.mdx'; + +You can use environment variables to override the config file for the Tyk Pump. Environment variables are created from the dot notation versions of the JSON objects contained with the config files. +To understand how the environment variables notation works, see [Environment Variables](/tyk-oss-gateway/configuration). + +All the Pump environment variables have the prefix `TYK_PMP_`. The environment variables will take precedence over the values in the configuration file. + + diff --git a/tyk-self-managed.mdx b/tyk-self-managed.mdx new file mode 100644 index 000000000..378024531 --- /dev/null +++ b/tyk-self-managed.mdx @@ -0,0 +1,26 @@ +--- +title: "Tyk Self Managed" +description: "This page serves as a comprehensive guide to migrating workloads to Tyk Self Managed" +keywords: "installation, migration, self managed" +sidebarTitle: "Overview" +--- + +## What is Tyk Self-Managed + +Tyk Self-Managed allows you to easily install our Full Lifecycle API Management solution in your own infrastructure. There is no calling home, and there are no usage limits. You have full control. + +## What Does Tyk Self-Managed Include? +The full Tyk Self-Managed system consists of: + +* [Tyk Gateway](/tyk-oss-gateway): Tyk Gateway is provided β€˜Batteries-included’, with no feature lockout. It is an open source enterprise API Gateway, supporting REST, GraphQL, TCP and gRPC protocols, that protects, secures and processes your APIs. +* [Tyk Dashboard](/api-management/dashboard-configuration): The management Dashboard and integration API manage a cluster of Tyk Gateways and also show analytics and features of the [Developer portal](/portal/overview/intro). The Dashboard also provides the API Developer Portal, a customizable developer portal for your API documentation, developer auto-enrollment and usage tracking. +* [Developer Portal](/portal/overview/intro): A customizable API portal to securely publish and manage API access for your consumers. +* [Tyk Pump](/api-management/tyk-pump): Tyk Pump handles moving analytics data between your gateways and your Dashboard (amongst other data sinks). The Tyk Pump is an open source analytics purger that moves the data generated by your Tyk nodes to any back-end. +* [Tyk Identity Broker](/api-management/external-service-integration#what-is-tyk-identity-broker-tib) (Optional): Tyk Identify Broker handles integrations with third-party IDP's. It (TIB) is a component providing a bridge between various Identity Management Systems such as LDAP, Social OAuth (e.g. GPlus, Twitter, GitHub) or Basic Authentication providers, to your Tyk installation. +* [Tyk Multi-Data Center Bridge](/api-management/mdcb) (Optional, add-on): Tyk Multi-Data Center Bridge allows for the configuration of a Tyk ecosystem that spans many data centers and clouds. It also (MDCB) acts as a broker between Tyk Gateway Instances that are isolated from one another and typically have their own Redis DB. + +Tyk Self-Managed Archtecture + +## Getting Started + +To get started with Tyk Self-Managed, you can follow the [Getting Start Guide](/getting-started/quick-start) which provides a step-by-step walkthrough of setting up Tyk in your environment. diff --git a/tyk-self-managed/install.mdx b/tyk-self-managed/install.mdx new file mode 100644 index 000000000..722a27451 --- /dev/null +++ b/tyk-self-managed/install.mdx @@ -0,0 +1,3206 @@ +--- +title: "Installation Options for Tyk Self-Managed" +description: "Explore the various installation options for Tyk Self-Managed, including Docker, Kubernetes, Ansible, and more." +sidebarTitle: "Installation Options" +--- + +import { ResponsiveGrid } from '/snippets/ResponsiveGrid.mdx'; + +## Introduction + + + + + +**Read time: 10 mins** + +Install with Docker + + + +**Read time: 10 mins** + +Install on K8s + + + +**Read time: 10 mins** + +Install with Ansible + + + +**Read time: 10 mins** + +Install on Red Hat + + + +**Read time: 10 mins** + +Install on Ubuntu + + + +**Read time: 10 mins** + +Install on Amazon AWS + + + +**Read time: 10 mins** + +Install Tyk on Heroku + + + +**Read time: 10 mins** + +Install on Microsoft Azure + + + + + + +## Tyk Dependencies and Database Support + +### MongoDB / PostgreSQL + +Tyk Dashboard requires a persistent datastore for its operations. By default MongoDB is used. From Tyk v4.0, we also support PostgreSQL. See [Database Options](/api-management/dashboard-configuration#supported-database) for a list of versions and drop-in replacements we support. + +### Redis + +Tyk Gateway requires Redis for its operations. Here is the list of supported versions: + +**Supported Versions** +- Tyk 5.3 supports Redis 6.2.x, 7.0.x, and 7.2.x +- Tyk 5.2.x and earlier supports Redis 6.0.x and Redis 6.2.x only. + +Visit the [Gateway page](/tyk-oss-gateway) for more info. + +### Tyk Gateway Architecture + +The Tyk Gateway can run completely independently, requiring only a Redis database, and can scale horizontally: + +Open Source Architecture + + +### Init Systems + +Tyk packages support SysVinit Linux init systems, [systemd](https://www.freedesktop.org/wiki/Software/systemd/) and Upstart (both 0.6.x and 1.x, [FYI - Ubuntu stopped supporting Upstart] upstart(https://askubuntu.com/questions/1024120/will-ubuntu-18-04-lts-still-support-upstart-or-do-we-have-to-change-to-systemd)). +During package installation only one is chosen depending on the operating system support, e.g.: + +* CentOS 6, RHEL 6, Amazon Linux ship with Upstart 0.6.x +* Ubuntu 14.04, Debian Jessie with Upstart 1.x +* CentOS 7, RHEL 7, Ubuntu 16.04, Debian Stretch are running with systemd +* Certain older distros may only provide SysVinit but all of them typically provide compatibility with its scripts + +Note that any init scripts of your choosing can be used instead of automatically detected ones by copying them from the `install/inits` directory inside the package directory. + +This init system variance implies there are different ways to manage the services and collect service logs. + +#### Upstart +For Upstart, service management can be performed through the `initctl` or a set of `start`, `stop`, `restart` and `status` commands. Upstart 1.x also works with the `service` command. + +#### systemd +For systemd, either `systemctl` or `service` commands may be utilized. + +The `service` command can usually be used with SysVinit scripts, as well as invoking them directly. + + + +* Upstart 0.6.x and SysVinit: log files are located in `/var/logs` for every respective service, e.g. `/var/logs/tyk-gateway.stderr` and `/var/logs/tyk-gateway.stdout` +* Upstart 1.x: by default everything is stored in `/var/logs/upstart` directory, e.g. `/var/logs/upstart/tyk-gateway.log` +* systemd utilizes its own logging mechanism called journald, which is usable via the `journalctl` command, e.g. `journalctl -u tyk-gateway` + + +Please consult with respective init system documentation for more details on how to use and configure it. + + + +## Install on Kubernetes + +The main way to install *Tyk Self-Managed* in a Kubernetes cluster is via Helm charts. +We are actively working to add flexibility and more user flows to our chart. Please reach out +to our teams on support or the cummunity forum if you have questions, requests or suggestions for improvements. + +Get started with one of our quick start guides: + +- [Quick Start with PostgreSQL](#install-tyk-stack-with-helm-chart-postgresql) +- [Quick Start with MongoDB](/tyk-self-managed/install#install-tyk-stack-with-helm-chart-mongodb) + +Or go to [Tyk Stack helm chart](/product-stack/tyk-charts/tyk-stack-chart) for detailed installation instructions and configuration options. + +### Install Tyk Stack with Helm Chart (PostgreSQL) + +The following guides provide instructions to install Redis, PostgreSQL, and Tyk stack with default configurations. It is intended for quick start only. For production, you should install and configure Redis and PostgreSQL separately. + +**Prerequisites** + +* [Kubernetes 1.19+](https://kubernetes.io/docs/setup/) +* [Helm 3+](https://helm.sh/docs/intro/install/) + +**Quick Start** + +The following quick start guide explains how to use the Tyk Stack Helm chart to configure a Dashboard that includes: +- Redis for key storage +- PostgreSQL for app config +- Tyk Pump to send analytics to PostgreSQL. It also opens a metrics endpoint where Prometheus (if available) can scrape from. + +At the end of this quickstart Tyk Dashboard should be accessible through service `dashboard-svc-tyk-tyk-dashboard` at port `3000`. You can login to Dashboard using the admin email and password to start managing APIs. Tyk Gateway will be accessible through service `gateway-svc-tyk-tyk-gateway.tyk.svc` at port `8080`. + +**1. Setup required credentials** + +First, you need to provide Tyk license, admin email and password, and API keys. We recommend to store them in secrets. + +```bash +NAMESPACE=tyk +REDIS_BITNAMI_CHART_VERSION=19.0.2 +POSTGRES_BITNAMI_CHART_VERSION=12.12.10 + +API_SECRET=changeit +ADMIN_KEY=changeit +TYK_LICENSE=changeit +ADMIN_EMAIL=admin@default.com +ADMIN_PASSWORD=changeit + +kubectl create namespace $NAMESPACE + +kubectl create secret generic my-secrets -n $NAMESPACE \ + --from-literal=APISecret=$API_SECRET \ + --from-literal=AdminSecret=$ADMIN_KEY \ + --from-literal=DashLicense=$TYK_LICENSE + +kubectl create secret generic admin-secrets -n $NAMESPACE \ + --from-literal=adminUserFirstName=Admin \ + --from-literal=adminUserLastName=User \ + --from-literal=adminUserEmail=$ADMIN_EMAIL \ + --from-literal=adminUserPassword=$ADMIN_PASSWORD +``` + +**2. Install Redis (if you don't already have Redis installed)** + +If you do not already have Redis installed, you may use these charts provided by Bitnami. + +```bash +helm upgrade tyk-redis oci://registry-1.docker.io/bitnamicharts/redis -n $NAMESPACE --install --version $REDIS_BITNAMI_CHART_VERSION +``` +Follow the notes from the installation output to get connection details and password. The DNS name of your Redis as set by Bitnami is `tyk-redis-master.tyk.svc:6379` (Tyk needs the name including the port) + +The Bitnami chart also creates a secret `tyk-redis` which stores the connection password in `redis-password`. We will make use of this secret in installation later. + +**3. Install PostgreSQL (if you don't already have PostgreSQL installed)** + +If you do not already have PostgreSQL installed, you may use these charts provided by Bitnami. + +```bash +helm upgrade tyk-postgres oci://registry-1.docker.io/bitnamicharts/postgresql --set "auth.database=tyk_analytics" -n $NAMESPACE --install --version $POSTGRES_BITNAMI_CHART_VERSION +``` + +Follow the notes from the installation output to get connection details. + +We require the PostgreSQL connection string for Tyk installation. This can be stored in a secret and will be used in installation later. + +```bash +POSTGRESQLURL=host=tyk-postgres-postgresql.$NAMESPACE.svc\ port=5432\ user=postgres\ password=$(kubectl get secret --namespace $NAMESPACE tyk-postgres-postgresql -o jsonpath="{.data.postgres-password}" | base64 -d)\ database=tyk_analytics\ sslmode=disable + +kubectl create secret generic postgres-secrets -n $NAMESPACE --from-literal=postgresUrl="$POSTGRESQLURL" +``` + + + + +Ensure that you are installing PostgreSQL versions that are supported by Tyk. Please consult the list of [supported versions](/api-management/dashboard-configuration#supported-database) that are compatible with Tyk. + + + +**4. Install Tyk** +```bash +helm repo add tyk-helm https://helm.tyk.io/public/helm/charts/ + +helm repo update + +helm upgrade tyk tyk-helm/tyk-stack -n $NAMESPACE \ + --install \ + --set global.adminUser.useSecretName=admin-secrets \ + --set global.secrets.useSecretName=my-secrets \ + --set global.redis.addrs="{tyk-redis-master.$NAMESPACE.svc:6379}" \ + --set global.redis.passSecret.name=tyk-redis \ + --set global.redis.passSecret.keyName=redis-password \ + --set global.postgres.connectionStringSecret.name=postgres-secrets \ + --set global.postgres.connectionStringSecret.keyName=postgresUrl +``` + +**5. Done!** + +Now Tyk Dashboard should be accessible through service `dashboard-svc-tyk-tyk-dashboard` at port `3000`. You can login to Dashboard using the admin email and password to start managing APIs. Tyk Gateway will be accessible through service `gateway-svc-tyk-tyk-gateway.tyk.svc` at port `8080`. + +You are now ready to [create an API](/api-management/gateway-config-managing-classic#create-an-api). + +For the complete installation guide and configuration options, please see [Tyk Stack Helm Chart](/product-stack/tyk-charts/tyk-stack-chart). + +### Install Tyk Stack with Helm Chart (MongoDB) + +The following guides provide instructions to install Redis, MongoDB, and Tyk stack with default configurations. It is intended for quick start only. For production, you should install and configure Redis and MongoDB separately. + +**Prerequisites** + +* [Kubernetes 1.19+](https://kubernetes.io/docs/setup/) +* [Helm 3+](https://helm.sh/docs/intro/install/) + + + + + If you want to enable Tyk Enterprise Developer Portal, please use [PostgreSQL](#install-tyk-stack-with-helm-chart-postgresql). MongoDB is not supported in Developer Portal. + + + +**Quick Start** + +The following quick start guide explains how to use the Tyk Stack Helm chart to configure a Dashboard that includes: +- Redis for key storage +- MongoDB for app config +- Tyk Pump to send analytics to MongoDB. It also opens a metrics endpoint where Prometheus (if available) can scrape from. + +At the end of this quickstart Tyk Dashboard should be accessible through service `dashboard-svc-tyk-tyk-dashboard` at port `3000`. You can login to Dashboard using the admin email and password to start managing APIs. Tyk Gateway will be accessible through service `gateway-svc-tyk-tyk-gateway.tyk.svc` at port `8080`. + +**1. Setup required credentials** + +First, you need to provide Tyk license, admin email and password, and API keys. We recommend to store them in secrets. +```bash +NAMESPACE=tyk +REDIS_BITNAMI_CHART_VERSION=19.0.2 +MONGO_BITNAMI_CHART_VERSION=15.1.3 + +API_SECRET=changeit +ADMIN_KEY=changeit +TYK_LICENSE=changeit +ADMIN_EMAIL=admin@default.com +ADMIN_PASSWORD=changeit + +kubectl create namespace $NAMESPACE + +kubectl create secret generic my-secrets -n $NAMESPACE \ + --from-literal=APISecret=$API_SECRET \ + --from-literal=AdminSecret=$ADMIN_KEY \ + --from-literal=DashLicense=$TYK_LICENSE + +kubectl create secret generic admin-secrets -n $NAMESPACE \ + --from-literal=adminUserFirstName=Admin \ + --from-literal=adminUserLastName=User \ + --from-literal=adminUserEmail=$ADMIN_EMAIL \ + --from-literal=adminUserPassword=$ADMIN_PASSWORD +``` + +**2. Install Redis (if you don't have a Redis instance)** + +If you do not already have Redis installed, you may use these charts provided by Bitnami. + +```bash +helm upgrade tyk-redis oci://registry-1.docker.io/bitnamicharts/redis -n $NAMESPACE --install --version $REDIS_BITNAMI_CHART_VERSION +``` +Follow the notes from the installation output to get connection details and password. The DNS name of your Redis as set by Bitnami is +`tyk-redis-master.tyk.svc:6379` (Tyk needs the name including the port) + +The Bitnami chart also creates a secret `tyk-redis` which stores the connection password in `redis-password`. We will make use of this secret in installation later. + + + +Please make sure you are installing Redis versions that are supported by Tyk. Please refer to Tyk docs to get list of [supported versions](/planning-for-production/database-settings#redis). + + + +**3. Install MongoDB (if you don't have a MongoDB instance)** + +If you do not already have MongoDB installed, you may use these charts provided by Bitnami. + +```bash +helm upgrade tyk-mongo oci://registry-1.docker.io/bitnamicharts/mongodb -n $NAMESPACE --install --version $MONGO_BITNAMI_CHART_VERSION +``` + + + +Please make sure you are installing MongoDB versions that are supported by Tyk. Please refer to Tyk docs to get list of [supported versions](/api-management/dashboard-configuration#supported-database). + + + + + +Bitnami MongoDB image is not supported on darwin/arm64 architecture. + + + +We require the MongoDB connection string for Tyk installation. You can store it in a secret and provide the secret in installation later. + +```bash +MONGOURL=mongodb://root:$(kubectl get secret --namespace $NAMESPACE tyk-mongo-mongodb -o jsonpath="{.data.mongodb-root-password}" | base64 -d)@tyk-mongo-mongodb.$NAMESPACE.svc:27017/tyk_analytics?authSource=admin + +kubectl create secret generic mongourl-secrets --from-literal=mongoUrl=$MONGOURL -n $NAMESPACE +``` + + + + +Ensure that you are installing MongoDB versions that are supported by Tyk. Please consult the list of [supported versions](/api-management/dashboard-configuration#supported-database) that are compatible with Tyk. + + + +**4. Install Tyk** +```bash +helm repo add tyk-helm https://helm.tyk.io/public/helm/charts/ + +helm repo update + +helm upgrade tyk tyk-helm/tyk-stack -n $NAMESPACE \ + --install \ + --set global.adminUser.useSecretName=admin-secrets \ + --set global.secrets.useSecretName=my-secrets \ + --set global.redis.addrs="{tyk-redis-master.$NAMESPACE.svc:6379}" \ + --set global.redis.passSecret.name=tyk-redis \ + --set global.redis.passSecret.keyName=redis-password \ + --set global.mongo.driver=mongo-go \ + --set global.mongo.connectionURLSecret.name=mongourl-secrets \ + --set global.mongo.connectionURLSecret.keyName=mongoUrl \ + --set global.storageType=mongo \ + --set tyk-pump.pump.backend='{prometheus,mongo}' +``` + +**5. Done!** + +Now Tyk Dashboard should be accessible through service `dashboard-svc-tyk-tyk-dashboard` at port `3000`. You can login to Dashboard using the admin email and password to start managing APIs. Tyk Gateway will be accessible through service `gateway-svc-tyk-tyk-gateway.tyk.svc` at port `8080`. + +You are now ready to [create an API](/api-management/gateway-config-managing-classic#create-an-api). + +For the complete installation guide and configuration options, please see [Tyk Stack Helm Chart](/product-stack/tyk-charts/tyk-stack-chart). + + +### Install Tyk Stack on Windows with Helm + + + + +Installing Tyk on Kubernetes requires a multi-node Tyk license. If you are evaluating Tyk on Kubernetes, [contact us](https://tyk.io/about/contact/) to obtain an temporary license. + + + + + +This deployment is NOT designed for production use or performance testing. The Tyk Pro Docker Demo is our full, [Self-Managed](/tyk-self-managed/install) solution, which includes our Gateway, Dashboard and analytics processing pipeline. + +This demo will run Tyk Self-Managed on your machine, which contains 5 containers: Tyk Gateway, Tyk Dashboard, Tyk Pump, Redis and either MongoDB or one of our supported [SQL databases](/api-management/dashboard-configuration#supported-database). + +This demo is great for proof of concept and demo purposes, but if you want to test performance, you need to move each component to a separate machine. + + + + + +You use this at your own risk. Tyk is not supported on the Windows platform. However you can test it as a proof of concept using our Pro Demo Docker installation. + + + +**Prerequisites** + +- MS Windows 10 Pro +- [Tyk Helm Chart](https://github.com/TykTechnologies/tyk-helm-chart) +- [Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/install/) running with a signed in [Docker ID](https://docs.docker.com/docker-id/) +- [minikube](https://minikube.sigs.k8s.io/docs/start/) +- [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- [Helm](https://github.com/helm/helm/releases) +- Git for Windows +- [Python for Windows](https://www.python.org/downloads/windows/) +- PowerShell running as administrator +- Our Pro Demo Docker [GitHub repo](https://github.com/TykTechnologies/tyk-pro-docker-demo) +- A free Tyk Self-Managed [Developer license](https://tyk.io/sign-up) + +Ensure that kubectl and helm prerequisites are configured on your Windows path environment variable + +This demo installation was tested with the following tools/versions: + +* Microsoft Windows 10 Pro v1909 VM on Azure (Standard D2 v3 size) +* Docker Desktop for Windows 2.2.0.0 (Docker engine v19.03.5) +* helm v3.0.3 +* minikube v1.7.1 (k8s v 1.17.2) +* kubectl v 1.17.0 (Note that kubectl is packaged with Docker Desktop for Windows, but the version may be incompatible with k8s) + +**Installation** + +Now you have your prerequisites, follow the instructions from our [Tyk Helm Chart](#use-legacy-helm-chart) page. + +### Use Legacy Helm Chart + + + +`tyk-pro` chart is deprecated. Please use our [Tyk Stack helm chart](/product-stack/tyk-charts/tyk-stack-chart) instead. + +We recommend all users migrate to the `tyk-stack` Chart. Please review the [Configuration](/product-stack/tyk-charts/tyk-stack-chart) section of the new helm chart and cross-check with your existing configurations while planning for migration. + + + +**Introduction** + +Tyk Helm chart is the preferred (and easiest) way to install **Tyk Self-Managed** on Kubernetes. +The helm chart `tyk-helm/tyk-pro` will install full Tyk platform with **Tyk Manager**, **Tyk Gateways** and **Tyk Pump** into your Kubernetes cluster. You can also choose to enable the installation of **Tyk Operator** (to manage your APIs in a declarative way). + +**Prerequisites** + +1. **Tyk License** + + If you are evaluating Tyk on Kubernetes, [contact us](https://tyk.io/about/contact/) to obtain a temporary license. + +2. **Data stores** + + The following are required for a Tyk Self-Managed installation: + - Redis - Should be installed in the cluster or reachable from inside the cluster (for SaaS option). + You can find instructions for a simple Redis installation bellow. + - MongoDB or SQL - Should be installed in the cluster or be reachable by the **Tyk Manager** (for SaaS option). + + You can find supported MongoDB and SQL versions [here](/planning-for-production/database-settings). + + Installation instructions for Redis and MongoDB/SQL are detailed below. + +3. **Helm** + + Installed [Helm 3](https://helm.sh/) + Tyk Helm Chart is using Helm v3 version (i.e. not Helm v2). + +**Installation** + +As well as our official Helm repo, you can also find it in [ArtifactHub](https://artifacthub.io/packages/helm/tyk-helm/tyk-pro). +[Open in ArtifactHub](https://artifacthub.io/packages/helm/tyk-helm/tyk-pro) + +If you are interested in contributing to our charts, suggesting changes, creating PRs or any other way, +please use [GitHub Tyk-helm-chart repo](https://github.com/TykTechnologies/tyk-helm-chart/tree/master/tyk-pro) +or contact us in [Tyk Community forum](https://community.tyk.io/) or through our sales team. + + +1. **Add Tyk official Helm repo to your local Helm repository** + + ```bash + helm repo add tyk-helm https://helm.tyk.io/public/helm/charts/ + helm repo update + ``` + +2. **Create namespace for your Tyk deployment** + + ```bash + kubectl create namespace tyk + ``` + +3. **Getting the values.yaml of the chart** + + Before we proceed with installation of the chart you need to set some custom values. + To see what options are configurable on a chart and save that options to a custom values.yaml file run: + + ```bash + helm show values tyk-helm/tyk-pro > values.yaml + ``` + +**Installing the data stores** + +For Redis, MongoDB or SQL you can use these rather excellent charts provided by Bitnami + + + +
+ +**Redis** +```bash +helm install tyk-redis bitnami/redis -n tyk --version 19.0.2 +``` + + + +Please make sure you are installing Redis versions that are supported by Tyk. Please refer to Tyk docs to get list of [supported versions](/planning-for-production/database-settings#redis). + + + +Follow the notes from the installation output to get connection details and password. + +```console + Redis(TM) can be accessed on the following DNS names from within your cluster: + + tyk-redis-master.tyk.svc.cluster.local for read/write operations (port 6379) + tyk-redis-replicas.tyk.svc.cluster.local for read-only operations (port 6379) + + export REDIS_PASSWORD=$(kubectl get secret --namespace tyk tyk-redis -o jsonpath="{.data.redis-password}" | base64 --decode) +``` + +The DNS name of your Redis as set by Bitnami is `tyk-redis-master.tyk.svc.cluster.local:6379` (Tyk needs the name including the port) +You can update them in your local `values.yaml` file under `redis.addrs` and `redis.pass` +Alternatively, you can use `--set` flag to set it in Tyk installation. For example `--set redis.pass=$REDIS_PASSWORD` +
+ +
+ +**MongoDB** +```bash +helm install tyk-mongo bitnami/mongodb --set "replicaSet.enabled=true" -n tyk --version 15.1.3 +``` + + +Bitnami MongoDB images is not supported on darwin/arm64 architecture. + + + +Follow the notes from the installation output to get connection details and password. The DNS name of your MongoDB as set with Bitnami is `tyk-mongo-mongodb.tyk.svc.cluster.local` and you also need to set the `authSource` parameter to `admin`. The full `mongoURL` should be similar to `mongoURL: mongodb://root:pass@tyk-mongo-mongodb.tyk.svc.cluster.local:27017/tyk_analytics?authSource=admin`. You can update them in your local `values.yaml` file under `mongo.mongoURL` Alternatively, you can use `--set` flag to set it in your Tyk installation. + + + +**Important Note regarding MongoDB** + +This Helm chart enables the *PodDisruptionBudget* for MongoDB with an arbiter replica-count of 1. If you intend to perform +system maintenance on the node where the MongoDB pod is running and this maintenance requires for the node to be drained, +this action will be prevented due the replica count being 1. Increase the replica count in the helm chart deployment to +a minimum of 2 to remedy this issue. + + + +
+ +
+ +**Postgres** +```bash +helm install tyk-postgres bitnami/postgresql --set "auth.database=tyk_analytics" -n tyk --version 12.12.10 +``` + + + +Please make sure you are installing PostgreSQL versions that are supported by Tyk. Please refer to Tyk docs to get list of [supported versions](/api-management/dashboard-configuration#supported-database). + + + +Follow the notes from the installation output to get connection details and password. The DNS name of your Postgres service as set by Bitnami is `tyk-postgres-postgresql.tyk.svc.cluster.local`. +You can update connection details in `values.yaml` file under `postgres`. +
+
+ +--- + +**Quick Redis and MongoDB PoC installation** + + +Another option for Redis and MongoDB, to get started quickly, is to use our **simple-redis** and **simple-mongodb** charts. +Please note that these provided charts must not ever be used in production and for anything +but a quick start evaluation only. Use external redis or Official Redis Helm chart in any other case. +We provide this chart, so you can quickly get up and running, however it is not meant for long term storage of data for example. + +```bash +helm install redis tyk-helm/simple-redis -n tyk +helm install mongo tyk-helm/simple-mongodb -n tyk +``` + + + +**License setting** + +For the **Tyk Self-Managed** chart we need to set the license key in your custom `values.yaml` file under `dash.license` field +or use `--set dash.license={YOUR-LICENSE_KEY}` with the `helm install` command. + + +Tyk Self-Managed licensing allow for different numbers of Gateway nodes to connect to a single Dashboard instance. +To ensure that your Gateway pods will not scale beyond your license allowance, please ensure that the Gateway's resource kind is `Deployment` +and the replica count to your license node limit. By default, the chart is configured to work with a single node license: `gateway.kind=Deployment` and `gateway.replicaCount=1`. + + + +**Please Note** + +There may be intermittent issues on the new pods during the rolling update process, when the total number of online +gateway pods is more than the license limit with lower amounts of Licensed nodes. + + + +**Installing Tyk Self managed** + +Now we can install the chart using our custom values: + +```bash +helm install tyk-pro tyk-helm/tyk-pro -f ./values.yaml -n tyk --wait +``` + + + +**Important Note regarding MongoDB** + +The `--wait` argument is important to successfully complete the bootstrap of your **Tyk Manager**. + + + +**Pump Installation** + +By default pump installation is disabled. You can enable it by setting `pump.enabled` to `true` in `values.yaml` file. +Alternatively, you can use `--set pump.enabled=true` while doing helm install. + +**Quick Pump configuration(Supported from tyk helm v0.10.0)** + +*1. Mongo Pump* + +To configure mongo pump, do following changings in `values.yaml` file: +1. Set `backend` to `mongo`. +2. Set connection string in `mongo.mongoURL`. + +*2. Postgres Pump* + +To configure postgres pump, do following changings in `values.yaml` file: +1. Set `backend` to `postgres`. +2. Set connection string parameters in `postgres` section. + +**Tyk Developer Portal** + +You can disable the bootstrapping of the Developer Portal by the `portal.bootstrap: false` in your local `values.yaml` file. + +**Using TLS** + +You can turn on the TLS option under the gateway section in your local `values.yaml` file which will make your Gateway +listen on port 443 and load up a dummy certificate. You can set your own default certificate by replacing the file in the `certs/` folder. + +**Mounting Files** + +To mount files to any of the Tyk stack components, add the following to the mounts array in the section of that component. +For example: + ```bash + - name: aws-mongo-ssl-cert + filename: rds-combined-ca-bundle.pem + mountPath: /etc/certs +``` + +**Sharding APIs** + +Sharding is the ability for you to decide which of your APIs are loaded on which of your Tyk Gateways. This option is +turned off by default, however, you can turn it on by updating the `gateway.sharding.enabled` option. Once you do that you +will also need to set the `gateway.sharding.tags` field with the tags that you want that particular Gateway to load. (ex. tags: "external,ingress".) +You can then add those tags to your APIs in the API Designer, under the **Advanced Options** tab, and +the **Segment Tags (Node Segmentation)** section in your Tyk Dashboard. +Check [Tyk Gateway Sharding](/api-management/multiple-environments#what-is-api-sharding-) for more details. + + +### Install More Tyk Components + +**Installing Tyk Enterprise Developer Portal** + +If you are deploying the **Tyk Enterprise Developer Portal**, set the appropriate values under the `enterprisePortal` section in your `values.yaml`. Please visit [Tyk Enterprise Developer Portal installation](/portal/install#using-legacy-helm-chart) for a step by step guide. + +>**Note**: Helm chart supports Enterprise Portal v1.2.0+ + +**Installing Tyk Self-managed Control Plane** + +If you are deploying the **Tyk Control plane**, a.k.a **MDCB**, for a **Tyk Multi Data Center Bridge** deployment then you set +the `mdcb.enabled: true` option in the local `values.yaml` to add of the **MDCB** component to your installation. +Check [Tyk Control plane](/api-management/mdcb) for more configuration details. + +This setting enables multi-cluster, multi Data-Center API management from a single dashboard. + + +**Tyk Identity Broker (TIB)** + +The **Tyk Identity Broker** (TIB) is a micro-service portal that provides a bridge between various Identity Management Systems +such as LDAP, OpenID Connect providers and legacy Basic Authentication providers, to your Tyk installation. +See [TIB](/api-management/external-service-integration#installing-tyk-identity-broker-tib) for more details. + +For SSO to **Tyk Manager** and **Tyk developer portal** purposes you do not need to install **TIB**, as its functionality is now +part of the **Tyk Manager**. However, if you want to run it separately (as you used to before this merge) or if you need it + as a broker for the **Tyk Gateway** you can do so. + +Once you have installed your **Tyk Gateway** and **Tyk Manager**, you can configure **TIB** by adding its configuration environment variables +under the `tib.extraEnvs` section and updating the `profile.json` in your `configs` folder. +See our [TIB GitHub repo](https://github.com/TykTechnologies/tyk-identity-broker#how-to-configure-tib). +Once you complete your modifications you can run the following command from the root of the repository to update your helm chart. + +```bash +helm upgrade tyk-pro values.yaml -n tyk +``` + +This chart implies there's a **ConfigMap** with a `profiles.json` definition in it. Please use `tib.configMap.profiles` value +to set the name of this **ConfigMap** (`tyk-tib-profiles-conf` by default). + + + +**Tyk Operator and Ingress** + +For a GitOps workflow used with a **Tyk Self-Managed** installation or setting the Tyk Gateway as a Kubernetes ingress controller, Tyk Operator enables you to manage API definitions, security policies and other Tyk features using Kubernetes manifest files. +To get started go to [Tyk Operator](/api-management/automations/operator). + +## Install on AWS Marketplace + + +Tyk offers a flexible and powerful API management solution through **Tyk Cloud** on the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-pboluroscnqro). Tyk Cloud is an end-to-end managed API platform where both the control plane and gateways are installed on AWS for a seamless, fully cloud-hosted experience. + +For those who need more deployment flexibility, Tyk Cloud also supports a [Hybrid Gateway](/tyk-cloud/environments-deployments/hybrid-gateways) option. In this setup, the control plane remains hosted and managed by Tyk on AWS, while the gateways can be deployed on your preferred cloud provider or on-premises environmentβ€”allowing you to meet data locality and compliance needs without sacrificing control. + +**Available AWS Deployment Regions** + +You can deploy Tyk Cloud in the following AWS regions: + +- **Singapore**: `aws-ap-southeast-1` +- **Frankfurt, Germany**: `aws-eu-central-1` +- **London, UK**: `aws-eu-west-2` +- **N. Virginia, USA**: `aws-us-east-1` +- **Oregon, USA**: `aws-us-west-2` +- **Australia**: `aws-ap-southeast-2` + +Getting started with Tyk Cloud via the AWS Marketplace is quick and easy. Sign up today to access Tyk’s comprehensive API management tools designed to scale with your needs. + +**Install Tyk on AWS EC2** + + +1. Spin up an [EC2 instance](https://aws.amazon.com/ec2/instance-types/), AWS Linux2 preferably, T2.Medium is fine + - add a public IP + - open up SG access to: + - 3000 for the Tyk Dashboard + - 8080 for the Tyk Gateway + - 22 TCP for SSH + +2. SSH into the instance +`ssh -i mykey.pem ec2-user@public-ec2-ip` + +3. Install Git, Docker, & Docker Compose +Feel free to copy paste these +```.sh +sudo yum update -y +sudo yum install git -y +sudo yum install -y docker +sudo service docker start +sudo usermod -aG docker ec2-user +sudo su +sudo curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose +sudo chmod +x /usr/local/bin/docker-compose +sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose +docker ps +``` + +4. Clone the Tyk Pro Docker repo + +```.bash +git clone https://github.com/TykTechnologies/tyk-pro-docker-demo +cd tyk-pro-docker-demo/ +``` + +5. Add the license key to `confs/tyk_analytics.conf` into the `license_key variable` using "vi" or "nano", etc + +**This is the most common place to have problems.** + +**Look for extra spaces between quotes ("") and the license key. It will not work if there are any.** + +Inside `tyk_analytics.conf`, `license_key` should look something like this, with a real license however: + +` +"license_key": "eyJhbGciOiJSUzI1NiIsInR5cCI...WQ", +` + +6. Run the containers via `docker-compose` + +```.bash +docker-compose up -d +``` + +7. Visit + +``` +http://:3000 +``` +and fill out the Bootstrap form! +**If you see any page besides the Bootstrap page, you have pasted the license key incorrectly** + +**Enable SSL for the Gateway & Dashboard** + +1. Add the following to `confs/tyk.conf` + +```.json +"policies.policy_connection_string": "https://tyk-dashboard:3000" +"db_app_conf_options.connection_string": "https://tyk-dashboard:3000" +"http_server_options": { + "use_ssl": true, + "certificates": [ + { + "domain_name": "*.yoursite.com", + "cert_file": "./new.cert.cert", + "key_file": "./new.cert.key" + } + ], + "ssl_insecure_skip_verify": true ## YOU ONLY NEED THIS IF YOU ARE USING SELF SIGNED CERTS +} +``` + +2. Add the following to `confs/tyk_analytics.conf` + +```.json +"tyk_api_config.Host": "https://tyk-gateway" +"http_server_options": { + "use_ssl": true, + "certificates": [ + { + "domain_name": "*.yoursite.com", + "cert_file": "./new.cert.cert", + "key_file": "./new.cert.key" + } + ] +} +``` + +3. Generate self-signed Certs: (Or bring your own CA signed) + +``` +openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes +``` + +4. Mount your certs to containers through `docker-compose.yml` + +```.yaml +tyk-dashboard: + ... + volumes: + - ./cert.pem:/opt/tyk-dashboard/new.cert.cert + - ./key.pem:/opt/tyk-dashboard/new.cert.key +tyk-gateway: + ... + volumes: + - ./cert.pem:/opt/tyk-gateway/new.cert.cert + - ./key.pem:/opt/tyk-gateway/new.cert.key +``` + +5. Restart your containers with the mounted files + +``` +docker-compose up -d tyk-dashboard tyk-gateway +``` + +6. Download the bootstrap script onto EC2 machine + +``` +wget https://raw.githubusercontent.com/sedkis/tyk/master/scripts/bootstrap-ssl.sh +``` + +7. Apply execute permissions to file: + +```chmod +x bootstrap.sh``` + +8. Run the bootstrap script + +```./bootstrap.sh localhost``` + +9. Done! use the generated user and password to log into The Tyk Dashboard + + +## Install with Ansible + + + +**Requirements** + +[Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) is required to run the following commands. + + + +**Getting Started** + +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repositry + + ```bash + $ git clone https://github.com/TykTechnologies/tyk-ansible + ``` + +2. `cd` into the directory + + ```.bash + $ cd tyk-ansible + ``` + +3. Run initialisation script to initialise environment + + ```bash + $ sh scripts/init.sh + ``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install the following: + + - Redis + - MongoDB or PostgreSQL + - Tyk Dashboard + - Tyk Gateway + - Tyk Pump + + ```bash + $ ansible-playbook playbook.yaml -t tyk-pro -t redis -t `mongodb` or `pgsql` + ``` + + You can choose to not install Redis, MongoDB or PostgreSQL by removing the `-t redis` or `-t mongodb` or `-t pgsql` However Redis and MongoDB or PostgreSQL are a requirement and need to be installed for the Tyk Pro installation to run. + + + + + For a production environment, we recommend that the Gateway, Dashboard and Pump are installed on separate machines. If installing multiple Gateways, you should install each on a separate machine. See [Planning for Production](/planning-for-production) For more details. + + + +**Supported Distributions** + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Amazon Linux | 2 | βœ… | +| CentOS | 8 | βœ… | +| CentOS | 7 | βœ… | +| Debian | 10 | βœ… | +| Debian | 9 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | +| Ubuntu | 21 | βœ… | +| Ubuntu | 20 | βœ… | +| Ubuntu | 18 | βœ… | +| Ubuntu | 16 | βœ… | + +**Variables** + +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| redis.host | | Redis server host if different than the hosts url | +| redis.port | `6379` | Redis server listening port | +| redis.pass | | Redis server password | +| redis.enableCluster | `false` | Enable if redis is running in cluster mode | +| redis.storage.database | `0` | Redis server database | +| redis.tls | `false` | Enable if redis connection is secured with SSL | +| mongo.host | | MongoDB server host if different than the hosts url | +| mongo.port | `27017` | MongoDB server listening port | +| mongo.tls | `false` | Enable if mongo connection is secured with SSL | +| pgsql.host | | PGSQL server host if different than the hosts url | +| pgsql.port | `5432` | PGSQL server listening port | +| pgsql.tls | `false` | Enable if pgsql connection is secured with SSL | +| dash.license | | Dashboard license| +| dash.service.host | | Dashboard server host if different than the hosts url | +| dash.service.port | `3000` | Dashboard server listening port | +| dash.service.proto | `http` | Dashboard server protocol | +| dash.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.service.host | | Gateway server host if different than the hosts url | +| gateway.service.port | `8080` | Gateway server listening port | +| gateway.service.proto | `http` | Gateway server protocol | +| gateway.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.sharding.enabled | `false` | Set to `true` to enable filtering (sharding) of APIs | +| gateway.sharding.tags | | The tags to use when filtering (sharding) Tyk Gateway nodes. Tags are processed as OR operations. If you include a non-filter tag (e.g. an identifier such as `node-id-1`, this will become available to your Dashboard analytics) | +| gateway.rpc.connString | | Use this setting to add the URL for your MDCB or load balancer host | +| gateway.rpc.useSSL | `true` | Set this option to `true` to use an SSL RPC connection| +| gateway.rpc.sslInsecureSkipVerify | `true` | Set this option to `true` to allow the certificate validation (certificate chain and hostname) to be skipped. This can be useful if you use a self-signed certificate | +| gateway.rpc.rpcKey | | Your organization ID to connect to the MDCB installation | +| gateway.rpc.apiKey | | This the API key of a user used to authenticate and authorize the Gateway’s access through MDCB. The user should be a standard Dashboard user with minimal privileges so as to reduce any risk if the user is compromised. The suggested security settings are read for Real-time notifications and the remaining options set to deny | +| gateway.rpc.groupId | | This is the `zone` that this instance inhabits, e.g. the cluster/data-center the Gateway lives in. The group ID must be the same across all the Gateways of a data-center/cluster which are also sharing the same Redis instance. This ID should also be unique per cluster (otherwise another Gateway cluster can pick up your keyspace events and your cluster will get zero updates). | + +- `vars/redis.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| redis_bind_interface | `0.0.0.0` | Binding address of Redis | + +Read more about Redis configuration [here](https://github.com/geerlingguy/ansible-role-redis). + +- `vars/mongodb.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| bind_ip | `0.0.0.0` | Binding address of MongoDB | +| mongodb_version | `4.4` | MongoDB version | + +Read more about MongoDB configuration [here](https://github.com/ansible-collections/community.mongodb). + +- `vars/pgsql.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| postgresql_databases[] | `[]` | Array of DBs to be created | +| postgresql_databases[].name | `tyk_analytics` | Database name | +| postgresql_users[] | `[]` | Array of users to be created | +| postgresql_users[`0`].name | `default` | User name | +| postgresql_users[`0`].password | `topsecretpassword` | User password | +| postgresql_global_config_options[] | `[]` | Postgres service config options | +| postgresql_global_config_options[`1`].option | `listen_addresses` | Listen address binding for the service | +| postgresql_global_config_options[`1`].value | `*` | Default value to listen to all addresses | +| postgresql_hba_entries[] | `[]` | Host based authenticaiton list| +| postgresql_hba_entries[`4`].type | `host` | Entry type | +| postgresql_hba_entries[`4`].database | `tyk_analytics` | Which database this entry will give access to | +| postgresql_hba_entries[`4`].user | `default` | What users this gain access from this entry | +| postgresql_hba_entries[`4`].address | `0.0.0.0/0` | What addresses this gain access from this entry | +| postgresql_hba_entries[`4`].auth_method | `md5` | What authentication method to to use for the users | + +Read more about PostgreSQL configuration [here](https://github.com/geerlingguy/ansible-role-postgresql). + + +## Install using Bootstrap CLI + +To list the available flags, execute `tyk-analytics bootstrap -h`: + +``` + usage: tyk-analytics bootstrap [] + + Bootstrap the Dashboard. + + Flags: + -h, --help Show context-sensitive help (also try --help-long and --help-man). + --version Show application version. + --conf="tyk_analytics.conf" + Load a named configuration file. + --create-org Create a new organisation. + --reuse-org=REUSE-ORG Reuse the organisation with given ID. + --drop-org=DROP-ORG Drop the organisation with given ID. +``` + + +**Description** + +The `bootstrap` command makes bootstrapping easier. It helps you to create organizations and users. The command needs a + config file path. By default, it looks at `tyk_analytics.conf` in the directory where the `tyk-analytics` binary is located. + For example: + + ```tyk-analytics bootstrap``` + + You can also give the path of a custom config file with the `--conf` flag. For example: + + ```tyk-analytics bootstrap --conf some-directory/custom.conf``` + + The tool can work in both auto and interactive modes. You can use the flags while running the command or you can just run + it without flags and use interactive mode. + + +**Environment Variables** + +You can override the config values by environment variables. See [how to configure an environment variable](/tyk-oss-gateway/configuration). + +For example, you can override hostname, port, mongo url, redis host and redis port values by exporting the following variables: + +- **TYK_DB_HOSTCONFIG_HOSTNAME** +- **TYK_DB_LISTENPORT** +- **TYK_DB_MONGOURL** +- **TYK_DB_REDISHOST** +- **TYK_DB_REDISPORT** + + +## Install with Docker + + +Tyk has three containers that are available to set up a Docker installation: + +* [The Tyk Gateway container](https://hub.docker.com/r/tykio/tyk-gateway/) +* [The Tyk Dashboard container](https://hub.docker.com/r/tykio/tyk-dashboard/) +* [The Tyk Pump container](https://hub.docker.com/r/tykio/tyk-pump-docker-pub/) + +All three are required for a full deployment. We recommend that each container is installed on a separate machine for optimum performance. + +From v5.5.0 onwards, these images are based on [distroless](https://github.com/GoogleContainerTools/distroless). This means that you will not be able to obtain a shell with `docker run --rm -it tykio/tyk-gateway:v5.5.0 sh`. The image can be inspected with tools like [dive](https://github.com/wagoodman/dive) or [Docker Desktop](https://www.docker.com/products/docker-desktop/). + +We also have a [Docker Tyk Pro Demo](/deployment-and-operations/tyk-self-managed/tyk-demos-and-pocs/overview#docker-compose-setup), which installs our full Self-Managed solution, which includes our Gateway, Dashboard, and analytics processing pipeline. This demo will run Tyk Self-Managed on your machine. + + +## Install on Heroku + +**Install Tyk API Gateway on Heroku** + +A full Tyk Self-Managed installation can be deployed to Heroku dynos and workers using [Heroku Container Registry and Runtime](https://devcenter.heroku.com/articles/) functionality. This guide will utilize [Tyk Docker images](https://hub.docker.com/u/tykio/) with a small amount of customization as well as an external MongoDB service. + + +**Prerequisites** + +1. Docker daemon installed and running locally +2. [Heroku account](https://www.heroku.com/), the free plan is sufficient for a basic PoC but not recommended for production usage +3. [Heroku CLI](https://devcenter.heroku.com/articles/heroku-cli) installed +4. MongoDB service (such as [Atlas](https://www.mongodb.com/cloud/atlas), [mLab](https://elements.heroku.com/addons/mongolab), or your own deployment), this guide is based on MongoDB Atlas but others should work as well +5. [Tyk License](https://tyk.io/pricing/on-premise/) (note that in case of running multiple gateway dynos, license type must match) +6. Checkout the [Tyk quickstart repository](https://github.com/TykTechnologies/tyk-pro-heroku) from GitHub +7. Python 2 or 3 in order to execute the bootstrap script + +**Creating Heroku Apps** + +We will create two Heroku apps, one for the Tyk Gateway (with [Redis add-on](https://devcenter.heroku.com/articles/heroku-redis) attached to it) and another for the Dashboard and Pump. + +Given Heroku CLI is installed and your Heroku account is available, log into it: +```bash +heroku login +``` + +Now create the Gateway app and note down its name: +```bash +heroku create +``` +``` +Creating app... done, β¬’ infinite-plains-14949 +https://infinite-plains-14949.herokuapp.com/ | https://git.heroku.com/infinite-plains-14949.git +``` + + +`--space` flag must be added to the command if the app is being created in a private space, see more details in the section on Heroku private spaces (below). + + + +Provision a Redis add-on (we'll use a `hobby-dev` plan for demonstration purposes but that's not suitable for production), replacing the app name with your own: +```bash +heroku addons:create heroku-redis:hobby-dev -a infinite-plains-14949 +``` +``` +Creating heroku-redis:hobby-dev on β¬’ infinite-plains-14949... free +Your add-on should be available in a few minutes. +! WARNING: Data stored in hobby plans on Heroku Redis are not persisted. +redis-infinite-35445 is being created in the background. The app will restart when complete... +Use heroku addons:info redis-infinite-35445 to check creation progress +Use heroku addons:docs heroku-redis to view documentation +``` + +Once add-on provisioning is done, the info command (replacing the add-on name with your own) will show the following output: +```bash +heroku addons:info redis-infinite-35445 +``` +``` +=== redis-infinite-35445 +Attachments: infinite-plains-14949::REDIS +Installed at: Sun May 18 2018 14:23:21 GMT+0300 (EEST) +Owning app: infinite-plains-14949 +Plan: heroku-redis:hobby-dev +Price: free +State: created +``` + +Time to create the Dashboard app and note down its name as well: +```bash +heroku create +``` +``` +Creating app... done, β¬’ evening-beach-40625 +https://evening-beach-40625.herokuapp.com/ | https://git.heroku.com/evening-beach-40625.git +``` + +Since the Dashboard and Pump need access to the same Redis instance as the gateway, we'll need to share the Gateway app's add-on with this new app: +```bash +heroku addons:attach infinite-plains-14949::REDIS -a evening-beach-40625 +``` +``` +Attaching redis-infinite-35445 to β¬’ evening-beach-40625... done +Setting REDIS config vars and restarting β¬’ evening-beach-40625... done, v3 +``` + +To check that both apps have access to the same Redis add-on, we can utilize the `heroku config` command and check for the Redis endpoint: +```bash +heroku config -a infinite-plains-14949 | grep REDIS_URL +heroku config -a evening-beach-40625 | grep REDIS_URL +``` + +Their outputs should match. + +**Deploy the Dashboard** + +It's recommended to start with the Dashboard so in your Heroku quickstart clone run: +```bash +cd analytics +ls dashboard +``` +``` +bootstrap.sh Dockerfile.web entrypoint.sh tyk_analytics.conf +``` + +You will find it contains a `Dockerfile.web` for the web dyno, a config file for the Dashboard, entrypoint script for the Docker container and a bootstrap script for seeding the dashboard instance with sample data. All these files are editable for your purposes but have sane defaults for a PoC. + + + +You can use the `FROM` statement in `Dockerfile.web` to use specific dashboard version and upgrade when needed instead of relying on the `latest` tag. + + + + +The [Dashboard configuration](/tyk-dashboard/configuration) can be changed by either editing the `tyk_analytics.conf` file or injecting them as [environment variables](/tyk-oss-gateway/configuration) via `heroku config`. In this guide we'll use the latter for simplicity of demonstration but there is merit to both methods. + +First let's set the license key: +```bash +heroku config:set TYK_DB_LICENSEKEY="your license key here" -a evening-beach-40625 +``` +``` +Setting TYK_DB_LICENSEKEY and restarting β¬’ evening-beach-40625... done, v4 +TYK_DB_LICENSEKEY: should show your license key here +``` + +Now the MongoDB endpoint (replacing with your actual endpoint): +```bash +heroku config:set TYK_DB_MONGOURL="mongodb://user:pass@mongoprimary.net:27017,mongosecondary.net:27017,mongotertiary.net:27017" -a evening-beach-40625 +``` +``` +Setting TYK_DB_MONGOURL and restarting β¬’ evening-beach-40625... done, v5 +TYK_DB_MONGOURL: mongodb://user:pass@mongoprimary.net:27017,mongosecondary.net:27017,mongotertiary.net:27017 +``` + +And enable SSL for it if your service supports/requires this: +```bash +heroku config:set TYK_DB_MONGOUSESSL="true" -a evening-beach-40625 +``` +``` +Setting TYK_DB_MONGOUSESSL and restarting β¬’ evening-beach-40625... done, v6 +TYK_DB_MONGOUSESSL: true +``` + +Since the Tyk Dashboard needs to access gateways sometimes, we'll need to specify the Gateway endpoint too, which is the Gateway app's URL: +```bash +heroku config:set TYK_DB_TYKAPI_HOST="https://infinite-plains-14949.herokuapp.com" -a evening-beach-40625 +heroku config:set TYK_DB_TYKAPI_PORT="443" -a evening-beach-40625 +``` +``` +Setting TYK_DB_TYKAPI_HOST and restarting β¬’ evening-beach-40625... done, v7 +TYK_DB_TYKAPI_HOST: https://infinite-plains-14949.herokuapp.com +Setting TYK_DB_TYKAPI_PORT and restarting β¬’ evening-beach-40625... done, v8 +TYK_DB_TYKAPI_PORT: 443 +``` + +This is enough for a basic Dashboard setup but we recommend also changing at least node and admin secrets with strong random values, as well as exploring other config options. + +Since the Tyk Pump is also a part of this application (as a worker process), we'll need to configure it too. + +```bash +ls pump +``` +``` +Dockerfile.pump entrypoint.sh pump.conf +``` + +Same principles apply here as well. Here we'll need to configure MongoDB endpoints for all the Pumps (this can also be done in the `pump.conf` file): +```bash +heroku config:set PMP_MONGO_MONGOURL="mongodb://user:pass@mongoprimary.net:27017,mongosecondary.net:27017,mongotertiary.net:27017" -a evening-beach-40625 +heroku config:set PMP_MONGO_MONGOUSESSL="true" + +heroku config:set PMP_MONGOAGG_MONGOURL="mongodb://user:pass@mongoprimary.net:27017,mongosecondary.net:27017,mongotertiary.net:27017" -a evening-beach-40625 +heroku config:set PMP_MONGOAGG_MONGOUSESSL="true" +``` + +With the configuration in place it's finally time to deploy our app to Heroku. + +First, make sure CLI is logged in to Heroku containers registry: +```bash +heroku container:login +``` +``` +Login Succeeded +``` + +Provided you're currently in `analytics` directory of the quickstart repo: +```bash +heroku container:push --recursive -a evening-beach-40625 +``` +``` +=== Building web (/tyk-heroku-docker/analytics/dashboard/Dockerfile.web) +Sending build context to Docker daemon 8.192kB +Step 1/5 : FROM tykio/tyk-dashboard:v1.6.1 + ---> fdbc67b43139 +Step 2/5 : COPY tyk_analytics.conf /opt/tyk-dashboard/tyk_analytics.conf + ---> 89be9913798b +Step 3/5 : COPY entrypoint.sh /opt/tyk-dashboard/entrypoint.sh + ---> c256152bff29 +Step 4/5 : ENTRYPOINT ["/bin/sh", "-c"] + ---> Running in bc9fe7a569c0 +Removing intermediate container bc9fe7a569c0 + ---> f40e6b259230 +Step 5/5 : CMD ["/opt/tyk-dashboard/entrypoint.sh"] + ---> Running in 705273810eea +Removing intermediate container 705273810eea + ---> abe9f10e8b21 +Successfully built abe9f10e8b21 +Successfully tagged registry.heroku.com/evening-beach-40625/web:latest +=== Building pump (/tyk-heroku-docker/analytics/pump/Dockerfile.pump) +Sending build context to Docker daemon 5.12kB +Step 1/5 : FROM tykio/tyk-pump-docker-pub:v0.5.2 + ---> 247c6b5795a9 +Step 2/5 : COPY pump.conf /opt/tyk-pump/pump.conf + ---> 1befeab8f092 +Step 3/5 : COPY entrypoint.sh /opt/tyk-pump/entrypoint.sh + ---> f8ad0681aa70 +Step 4/5 : ENTRYPOINT ["/bin/sh", "-c"] + ---> Running in 0c30d35b9e2b +Removing intermediate container 0c30d35b9e2b + ---> b17bd6a8ed44 +Step 5/5 : CMD ["/opt/tyk-pump/entrypoint.sh"] + ---> Running in a16acb453b62 +Removing intermediate container a16acb453b62 + ---> 47ac9f221d8d +Successfully built 47ac9f221d8d +Successfully tagged registry.heroku.com/evening-beach-40625/pump:latest +=== Pushing web (/tyk-heroku-docker/analytics/dashboard/Dockerfile.web) +The push refers to repository [registry.heroku.com/evening-beach-40625/web] +c60cf00e6e9b: Pushed +11d074829795: Pushed +8b72aa2b2acc: Pushed +ca2feecf234c: Pushed +803aafd71223: Pushed +43efe85a991c: Pushed +latest: digest: sha256:b857afaa69154597558afb2462896275ab667b729072fac224487f140427fa73 size: 1574 +=== Pushing pump (/tyk-heroku-docker/analytics/pump/Dockerfile.pump) +The push refers to repository [registry.heroku.com/evening-beach-40625/pump] +eeddc94b8282: Pushed +37f3b3ce56ab: Pushed +4b61531ec7dc: Pushed +eca9efd615d9: Pushed +0f700064c5a1: Pushed +43efe85a991c: Mounted from evening-beach-40625/web +latest: digest: sha256:f45acaefa3b47a126dd784a888c89e420814ad3031d3d4d4885e340a59aec31c size: 1573 +``` + +This has built Docker images for both dashboard and pump, as well as pushed them to Heroku registry and automatically deployed to the application. + +Provided everything went well (and if not, inspect the application logs), you should be seeing the Dashboard login page at your app URL (e.g "https://evening-beach-40625.herokuapp.com/"). + +However, it doesn't yet have any accounts. It order to populate it please run the `dashboard/bootstrap.sh` script: +```bash +dashboard/bootstrap.sh evening-beach-40625.herokuapp.com +``` +``` +Creating Organization +ORGID: 5b016ca530867500050b9e90 +Adding new user +USER AUTH: a0f7c1e878634a60599dc037489a880f +NEW ID: 5b016ca6dcd0056d702dc40e +Setting password + +DONE +==== +Login at https://evening-beach-40625.herokuapp.com/ +User: c7ze82m8k3@default.com +Pass: test123 +``` + +It will generate a default organization with random admin username and a specified password. The bootstrap script can be edited to suit your needs as well as just editing the user info in the dashboard. + +If this was successful, you should be able to log into your dashboard now. + +The last step in this app is to start the Pump worker dyno since by default only the web dyno is enabled: +```bash +heroku dyno:scale pump=1 -a evening-beach-40625 +``` +``` +Scaling dynos... done, now running pump at 1:Free +``` + +At that point the dyno formation should look like this: +```bash +heroku dyno:scale -a evening-beach-40625 +``` +``` +pump=1:Free web=1:Free +``` + +**Deploy the Gateway** + +The process is very similar for the Tyk Gateway, except it doesn't have a worker process and doesn't need access to MongoDB. + +```bash +cd ../gateway +ls +``` +``` +Dockerfile.web entrypoint.sh tyk.conf +``` + +All these files serve the same purpose as with the Dasboard and the Pump. [Configuration](/tyk-oss-gateway/configuration) can either be edited in `tyk.conf` or [injected](/tyk-oss-gateway/configuration) with `heroku config`. + +To get things going we'll need to set following options for the Dashboard endpoint (substituting the actual endpoint and the app name, now for the gateway app): +```bash +heroku config:set TYK_GW_DBAPPCONFOPTIONS_CONNECTIONSTRING="https://evening-beach-40625.herokuapp.com" -a infinite-plains-14949 +heroku config:set TYK_GW_POLICIES_POLICYCONNECTIONSTRING="https://evening-beach-40625.herokuapp.com" -a infinite-plains-14949 +``` +``` +Setting TYK_GW_DBAPPCONFOPTIONS_CONNECTIONSTRING and restarting β¬’ infinite-plains-14949... done, v4 +TYK_GW_DBAPPCONFOPTIONS_CONNECTIONSTRING: https://evening-beach-40625.herokuapp.com +Setting TYK_GW_POLICIES_POLICYCONNECTIONSTRING and restarting β¬’ infinite-plains-14949... done, v5 +TYK_GW_POLICIES_POLICYCONNECTIONSTRING: https://evening-beach-40625.herokuapp.com +``` + +Since the Redis configuration will be automatically discovered (it's already injected by Heroku), we're ready to deploy: +```bash +heroku container:push --recursive -a infinite-plains-14949 +``` +``` +=== Building web (/tyk-heroku-docker/gateway/Dockerfile.web) +Sending build context to Docker daemon 6.144kB +Step 1/5 : FROM tykio/tyk-gateway:v2.6.1 + ---> f1201002e0b7 +Step 2/5 : COPY tyk.conf /opt/tyk-gateway/tyk.conf + ---> b118611dc36b +Step 3/5 : COPY entrypoint.sh /opt/tyk-gateway/entrypoint.sh + ---> 68ad364030cd +Step 4/5 : ENTRYPOINT ["/bin/sh", "-c"] + ---> Running in 859f4c15a0d2 +Removing intermediate container 859f4c15a0d2 + ---> 5f8c0d1b378a +Step 5/5 : CMD ["/opt/tyk-gateway/entrypoint.sh"] + ---> Running in 44c5e4c87708 +Removing intermediate container 44c5e4c87708 + ---> 86a9eb509968 +Successfully built 86a9eb509968 +Successfully tagged registry.heroku.com/infinite-plains-14949/web:latest +=== Pushing web (/tyk-heroku-docker/gateway/Dockerfile.web) +The push refers to repository [registry.heroku.com/infinite-plains-14949/web] +b8a4c3e3f93c: Pushed +0b7bae5497cd: Pushed +e8964f363bf4: Pushed +379aae48d347: Pushed +ab2b28b92877: Pushed +021ee50b0983: Pushed +43efe85a991c: Mounted from evening-beach-40625/pump +latest: digest: sha256:d67b8f55d729bb56e06fe38e17c2016a36f2edcd4f01760c0e62a13bb3c9ed38 size: 1781 +``` + +Inspect the logs (`heroku logs -a infinite-plains-14949`) to check that deployment was successful, also the node should be registered by the Dashboard in "System Management" -> "Nodes and Licenses" section. + +You're ready to follow the guide on [creating and managing your APIs](/api-management/gateway-config-managing-classic#create-an-api) with this Heroku deployment. + + + +To use the [geographic log distribution](/api-management/dashboard-configuration#activity-by-location) feature in the Dashboard please supply the GeoLite2 DB in the `gateway` directory, uncomment the marked line in `Dockerfile.web` and set the `analytics_config.enable_geo_ip` setting (or `TYK_GW_ANALYTICSCONFIG_ENABLEGEOIP` env var) to `true`. + + + +**Heroku Private Spaces** + +Most instructions are valid for [Heroku Private Spaces runtime](https://devcenter.heroku.com/articles/private-spaces). However there are several differences to keep in mind. + +Heroku app creation commands must include the private space name in the `--space` flag, e.g.: +```bash +heroku create --space test-space-virginia +``` + +When deploying to the app, the container must be released manually after pushing the image to the app: +```bash +heroku container:push --recursive -a analytics-app-name +heroku container:release web -a analytics-app-name +heroku container:release pump -a analytics-app-name +``` + +Similarly, the Gateway: +```bash +heroku container:push --recursive -a gateway-app-name +heroku container:release web -a gateway-app-name +``` + +Please allow several minutes for the first deployment to start as additional infrastructure is being created for it. Next deployments are faster. + +Private spaces maintain stable set of IPs that can be used for allowing fixed set of IPs on your upstream side (e.g. on an external database service). Find them using the following command: +```bash +heroku spaces:info --space test-space-virginia +``` + +Alternatively VPC peering can be used with the private spaces if external service supports it. This way exposure to external network can be avoided. For instance, see [MongoDB Atlas guide](https://www.mongodb.com/blog/post/integrating-mongodb-atlas-with-heroku-private-spaces) for setting this up. + +The minimal Heroku Redis add-on plan that installs into your private space is currently `private-7`. Please refer to [Heroku's Redis with private spaces guide](https://devcenter.heroku.com/articles/heroku-redis-and-private-spaces) for more information. + +Apps in private spaces don't enable SSL/TLS by default. It needs to be configured in the app settings along with the domain name for it. If it's not enabled, please make sure that configs that refer to corresponding hosts are using HTTP instead of HTTPS and related ports (80 for HTTP). + +**Gateway Plugins** + +In order to enable [rich plugins](/api-management/plugins/rich-plugins#) for the Gateway, please set the following Heroku config option to either `python` or `lua` depending on the type of plugins used: +```bash +heroku config:set TYK_PLUGINS="python" -a infinite-plains-14949 +``` +``` +Setting TYK_PLUGINS and restarting β¬’ infinite-plains-14949... done, v9 +TYK_PLUGINS: python +``` + +After re-starting the Gateway, the logs should be showing something similar to this: +``` +2018-05-18T13:13:50.272511+00:00 app[web.1]: Tyk will be using python plugins +2018-05-18T13:13:50.311510+00:00 app[web.1]: time="May 18 13:13:50" level=info msg="Setting PYTHONPATH to 'coprocess/python:middleware/python:event_handlers:coprocess/python/proto'" +2018-05-18T13:13:50.311544+00:00 app[web.1]: time="May 18 13:13:50" level=info msg="Initializing interpreter, Py_Initialize()" +2018-05-18T13:13:50.497815+00:00 app[web.1]: time="May 18 13:13:50" level=info msg="Initializing dispatcher" +``` + +Set this variable back to an empty value in order to revert back to the default behavior. + +**Upgrading or Customizing Tyk** + +Since this deployment is based on Docker images and containers, upgrading or making changes to the deployment is as easy as building a new image and pushing it to the registry. + +Specifically, upgrading version of any Tyk components is done by editing the corresponding `Dockerfile` and replacing the base image version tag. E.g. changing `FROM tykio/tyk-gateway:v2.5.4` to `FROM tykio/tyk-gateway:v2.6.1` will pull the Tyk gateway 2.6.1. We highly recommend specifying concrete version tags instead of `latest` for better house keeping. + +Once these changes have been made just run `heroku container:push --recursive -a app_name` on the corresponding directory as shown previously in this guide. This will do all the building and pushing as well as gracefully deploying on your Heroku app. + + +Please refer to [Heroku documentation on containers and registry](https://devcenter.heroku.com/articles/container-registry-and-runtime) for more information. + + +## Install on Microsoft Azure + +[Azure](https://azure.microsoft.com/en-us/explore/) is Microsoft's cloud services platform. It supports both the running of [Ubuntu Servers](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/canonical.0001-com-ubuntu-server-focal?tab=overview), as well as [Docker](https://learn.microsoft.com/en-us/previous-versions/azure/virtual-machines/linux/docker-machine) and [Docker-Compose](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/docker-compose-quickstart). + +For more details, see the [Azure Documentation](https://docs.microsoft.com/en-us/azure/). + +**Tyk Installation Options for Azure ** + +Azure allows you to install Tyk in the following ways: + +**On-Premises** + +1. Via our [Ubuntu Setup](/tyk-self-managed/install#debian-ubuntu-install-gateway) on an installed Ubuntu Server on Azure. +2. Via our [Docker Installation](/deployment-and-operations/tyk-self-managed/tyk-demos-and-pocs/overview#docker-compose-setup) using Azure's Docker support. + +See our video for installing Tyk on Ubuntu via Azure: + + + +We also have a [blog post](https://tyk.io/blog/getting-started-with-tyk-on-microsoft-azure-and-ubuntu/) that walks you through installing Tyk on Azure. + + +## Install to Google Cloud + +[GCP](https://cloud.google.com/) is Google's Cloud services platform. It supports both the running of [Ubuntu Servers](https://console.cloud.google.com/marketplace/browse?q=ubuntu%2020.04) and [Docker](https://cloud.google.com/build/docs/cloud-builders). + +For more details, see the [Google Cloud Documentation](https://cloud.google.com/docs). + +**Tyk Installation Options for Google CLoud ** + +Google Cloud allows you to install Tyk in the following ways: + +**On-Premises** + +1. Via our [Ubuntu Setup](/tyk-self-managed/install#debian-ubuntu-install-gateway) on an installed Ubuntu Server within Google Cloud. +2. Via our [Docker Installation](/deployment-and-operations/tyk-self-managed/tyk-demos-and-pocs/overview#docker-compose-setup) using Google Cloud's Docker support. + +**Tyk Pump on GCP** + +When running Tyk Pump in GCP using [Cloud Run](https://cloud.google.com/run/docs/overview/what-is-cloud-run) it is available 24/7. However, since it is serverless you also need to ensure that the _CPU always allocated_ option is configured to ensure availability of the analytics. Otherwise, for each request there will be a lag between the Tyk Pump container starting up and having the CPU allocated. Subsequently, the analytics would only be available during this time. + +1. Configure Cloud Run to have the [CPU always allocated](https://cloud.google.com/run/docs/configuring/cpu-allocation#setting) option enabled. Otherwise, the Tyk Pump container needs to warm up, which takes approximately 1 min. Subsequently, by this time the stats are removed from Redis. + +2. Update the Tyk Gateway [configuration](/tyk-oss-gateway/configuration#analytics_configstorage_expiration_time) to keep the stats for 3 mins to allow Tyk Pump to process them. This value should be greater than the Pump [purge delay](/tyk-pump/tyk-pump-configuration/tyk-pump-environment-variables#purge_delay) to ensure the analytics data exists long enough in Redis to be processed by the Pump. + + +## Install Tyk on Red Hat (RHEL / CentOS) + + +Select the preferred way of installing Tyk by selecting **Shell** or **Ansible** tab for instructions. +There are 4 components which needs to be installed. Each can be installed via shell or ansible + +### Install Database + +#### Using Shell + +**Supported Distributions** +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| CentOS | 7 | βœ… | +| RHEL | 9 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | + + +**Install and Configure Dependencies** +
+ +**Redis** + +Tyk Gateway has a [dependency](/planning-for-production/database-settings#redis) on Redis. Follow the steps provided by Red Hat to make the installation of Redis, conducting a [search](https://access.redhat.com/search/?q=redis) for the correct version and distribution. + +**Storage Database** + +Tyk Dashboard has a dependency on a storage database that can be [PostgreSQL](/planning-for-production/database-settings#postgresql) or [MongoDB](/planning-for-production/database-settings#mongodb-sizing-guidelines). + + +**Option 1: Install PostgreSQL** + +Check the PostgreSQL supported [versions](/planning-for-production/database-settings#postgresql). Follow the steps provided by [PostgreSQL](https://www.postgresql.org/download/linux/redhat/) to install it. + +Configure PostgreSQL + +Create a new role/user +```console +sudo -u postgres createuser --interactive +``` +The name of the role can be "tyk" and say yes to make it a superuser + +Create a matching DB with the same name. Postgres authentication system assumes by default that for any role used to log in, that role will have a database with the same name which it can access. +```console +sudo -u postgres createdb tyk +``` +Add another user to be used to log into your operating system + +```console +sudo adduser tyk +``` +Log in to your Database +```console +sudo -u tyk psql +``` +Update the user β€œtyk” to have a password +```console +ALTER ROLE tyk with PASSWORD '123456'; +``` +Create a DB (my example is tyk_analytics) +```console +sudo -u tyk createdb tyk_analytics +``` +**Option 2: Install MongoDB** +
+Check the MongoDB supported [versions](/planning-for-production/database-settings#mongodb-sizing-guidelines). Follow the steps provided by [MongoDB](https://www.mongodb.com/docs/manual/tutorial/install-mongodb-on-red-hat/) to install it. + +Optionally initialize the database and enable automatic start: +```console +# Optionally ensure that MongoDB will start following a system reboot +sudo systemctl enable mongod +# start MongoDB server +sudo systemctl start mongod +``` + +#### Using Ansible +You can install Tyk on RHEL or CentOS using our YUM repositories. Follow the guides and tutorials in this section to have Tyk up and running in no time. + +The order is to install Tyk Dashboard, then Tyk Pump and then Tyk Gateway for a full stack. + +- [Dashboard](#install-dashboard) +- [Pump](#install-pump) +- [Gateway](#install-gateway) + + + + + For a production environment, we recommend that the Tyk Gateway, Tyk Dashboard and Tyk Pump are installed on separate machines. If installing multiple Tyk Gateways, you should install each on a separate machine. See [Planning for Production](/planning-for-production) for more details. + + + +**Supported Distributions** + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| CentOS | 7 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | + + +**Requirements** + +[Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) - required for running the commands below. + +**Getting Started** + +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repositry + + ```console + $ git clone https://github.com/TykTechnologies/tyk-ansible + ``` + +2. `cd` into the directory + + ```console + $ cd tyk-ansible + ``` + +3. Run initialisation script to initialise environment + + ```console + $ sh scripts/init.sh + ``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install the following: + + - Redis + - MongoDB or PostgreSQL + - Tyk Dashboard + - Tyk Gateway + - Tyk Pump + + ```console + $ ansible-playbook playbook.yaml -t tyk-pro -t redis -t `mongodb` or `pgsql` + ``` + + You can choose to not install Redis, MongoDB or PostgreSQL by removing the `-t redis` or `-t mongodb` or `-t pgsql` However Redis and MongoDB or PostgreSQL are a requirement and need to be installed for the Tyk Pro installation to run. + +**Variables** + +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| redis.host | | Redis server host if different than the hosts url | +| redis.port | `6379` | Redis server listening port | +| redis.pass | | Redis server password | +| redis.enableCluster | `false` | Enable if redis is running in cluster mode | +| redis.storage.database | `0` | Redis server database | +| redis.tls | `false` | Enable if redis connection is secured with SSL | +| mongo.host | | MongoDB server host if different than the hosts url | +| mongo.port | `27017` | MongoDB server listening port | +| mongo.tls | `false` | Enable if mongo connection is secured with SSL | +| pgsql.host | | PGSQL server host if different than the hosts url | +| pgsql.port | `5432` | PGSQL server listening port | +| pgsql.tls | `false` | Enable if pgsql connection is secured with SSL | +| dash.license | | Dashboard license| +| dash.service.host | | Dashboard server host if different than the hosts url | +| dash.service.port | `3000` | Dashboard server listening port | +| dash.service.proto | `http` | Dashboard server protocol | +| dash.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.service.host | | Gateway server host if different than the hosts url | +| gateway.service.port | `8080` | Gateway server listening port | +| gateway.service.proto | `http` | Gateway server protocol | +| gateway.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.sharding.enabled | `false` | Set to `true` to enable filtering (sharding) of APIs | +| gateway.sharding.tags | | The tags to use when filtering (sharding) Tyk Gateway nodes. Tags are processed as OR operations. If you include a non-filter tag (e.g. an identifier such as `node-id-1`, this will become available to your Dashboard analytics) | +| gateway.rpc.connString | | Use this setting to add the URL for your MDCB or load balancer host | +| gateway.rpc.useSSL | `true` | Set this option to `true` to use an SSL RPC connection| +| gateway.rpc.sslInsecureSkipVerify | `true` | Set this option to `true` to allow the certificate validation (certificate chain and hostname) to be skipped. This can be useful if you use a self-signed certificate | +| gateway.rpc.rpcKey | | Your organization ID to connect to the MDCB installation | +| gateway.rpc.apiKey | | This the API key of a user used to authenticate and authorize the Gateway’s access through MDCB. The user should be a standard Dashboard user with minimal privileges so as to reduce any risk if the user is compromised. The suggested security settings are read for Real-time notifications and the remaining options set to deny | +| gateway.rpc.groupId | | This is the `zone` that this instance inhabits, e.g. the cluster/data-center the Gateway lives in. The group ID must be the same across all the Gateways of a data-center/cluster which are also sharing the same Redis instance. This ID should also be unique per cluster (otherwise another Gateway cluster can pick up your keyspace events and your cluster will get zero updates). | + +- `vars/redis.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| redis_bind_interface | `0.0.0.0` | Binding address of Redis | + +Read more about Redis configuration [here](https://github.com/geerlingguy/ansible-role-redis). + +- `vars/mongodb.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| bind_ip | `0.0.0.0` | Binding address of MongoDB | +| mongodb_version | `4.4` | MongoDB version | + +Read more about MongoDB configuration [here](https://github.com/ansible-collections/community.mongodb). + +- `vars/pgsql.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| postgresql_databases[] | `[]` | Array of DBs to be created | +| postgresql_databases[].name | `tyk_analytics` | Database name | +| postgresql_users[] | `[]` | Array of users to be created | +| postgresql_users[`0`].name | `default` | User name | +| postgresql_users[`0`].password | `topsecretpassword` | User password | +| postgresql_global_config_options[] | `[]` | Postgres service config options | +| postgresql_global_config_options[`1`].option | `listen_addresses` | Listen address binding for the service | +| postgresql_global_config_options[`1`].value | `*` | Default value to listen to all addresses | +| postgresql_hba_entries[] | `[]` | Host based authenticaiton list| +| postgresql_hba_entries[`4`].type | `host` | Entry type | +| postgresql_hba_entries[`4`].database | `tyk_analytics` | Which database this entry will give access to | +| postgresql_hba_entries[`4`].user | `default` | What users this gain access from this entry | +| postgresql_hba_entries[`4`].address | `0.0.0.0/0` | What addresses this gain access from this entry | +| postgresql_hba_entries[`4`].auth_method | `md5` | What authentication method to to use for the users | + +Read more about PostgreSQL configuration [here](https://github.com/geerlingguy/ansible-role-postgresql). + + +### Install Dashboard + +#### Using Shell + +Tyk has its own signed RPMs in a YUM repository hosted by the kind folks at [packagecloud.io](https://packagecloud.io/tyk/tyk-dashboard/install#manual-rpm), which makes it easy, safe and secure to install a trusted distribution of the Tyk Gateway stack. + +This configuration should also work (with some tweaks) for CentOS. + +**Prerequisites** + +* Ensure port `3000` is open: This is used by the Dashboard to provide the GUI and the Classic Developer Portal. +* Follow the steps provided in this link [Getting started on Red Hat (RHEL / CentOS)](#install-tyk-on-redhat-rhel-centos) to install and configure Tyk dependencies. + +1. **Set up YUM Repositories** + + First, install two package management utilities `yum-utils` and a file downloading tool `wget`: + ```bash + sudo yum install yum-utils wget + ``` + Then install Python: + ```bash + sudo yum install python3 + ``` + +2. **Configure and Install the Tyk Dashboard** + + Create a file named `/etc/yum.repos.d/tyk_tyk-dashboard.repo` that contains the repository configuration settings for YUM repositories `tyk_tyk-dashboard` and `tyk_tyk-dashboard-source` used to download packages from the specified URLs, including GPG key verification and SSL settings, on a Linux system. + + Make sure to replace `el` and `8` in the config below with your Linux distribution and version: + ```bash + [tyk_tyk-dashboard] + name=tyk_tyk-dashboard + baseurl=https://packagecloud.io/tyk/tyk-dashboard/el/8/$basearch + repo_gpgcheck=1 + gpgcheck=0 + enabled=1 + gpgkey=https://packagecloud.io/tyk/tyk-dashboard/gpgkey + sslverify=1 + sslcacert=/etc/pki/tls/certs/ca-bundle.crt + metadata_expire=300 + + [tyk_tyk-dashboard-source] + name=tyk_tyk-dashboard-source + baseurl=https://packagecloud.io/tyk/tyk-dashboard/el/8/SRPMS + repo_gpgcheck=1 + gpgcheck=0 + enabled=1 + gpgkey=https://packagecloud.io/tyk/tyk-dashboard/gpgkey + sslverify=1 + sslcacert=/etc/pki/tls/certs/ca-bundle.crt + metadata_expire=300 + ``` + + We'll need to update the YUM package manager's local cache, enabling only the `tyk_tyk-dashboard` repository while disabling all other repositories `--disablerepo='*' --enablerepo='tyk_tyk-dashboard'`, and confirm all prompts `-y`. + ```bash + sudo yum -q makecache -y --disablerepo='*' --enablerepo='tyk_tyk-dashboard' + ``` + + Install Tyk dashboard: + ```bash + sudo yum install -y tyk-dashboard + ``` + +3. **Confirm Redis and MongoDB or PostgreSQL are running** + + Start Redis since it is always required by the Dashboard. + ```bash + sudo service redis start + ``` + Then start either MongoDB or PostgreSQL depending on which one you are using. + ```bash + sudo systemctl start mongod + ``` + ```bash + sudo systemctl start postgresql-13 + ``` + +4. **Configure Tyk Dashboard** + +We can set the Dashboard up with a similar setup command, the script below will get the Dashboard set up for the local instance. +Make sure to use the actual DNS hostname or the public IP of your instance as the last parameter. + + + + +```bash +sudo /opt/tyk-dashboard/install/setup.sh --listenport=3000 --redishost= --redisport=6379 --mongo=mongodb://:/tyk_analytics --tyk_api_hostname=$HOSTNAME --tyk_node_hostname=http://localhost --tyk_node_port=8080 --portal_root=/portal --domain="XXX.XXX.XXX.XXX" +``` + +Replace ``, `` and `` with your own values to run this script. + + + + +```bash +sudo /opt/tyk-dashboard/install/setup.sh --listenport=3000 --redishost= --redisport=6379 --storage=postgres --connection_string=postgresql://:@:/ --tyk_api_hostname=$HOSTNAME --tyk_node_hostname=http://localhost --tyk_node_port=8080 --portal_root=/portal --domain="XXX.XXX.XXX.XXX" +``` + +Replace ``,``,``, ``, `` and `` with your own values to run the script. + + + + +With these values your are configuring the following: + +* `--listenport=3000`: Tyk Dashboard (and Portal) to listen on port `3000`. +* `--redishost=`: Tyk Dashboard should use the local Redis instance. +* `--redisport=6379`: The Tyk Dashboard should use the default port. +* `--domain="XXX.XXX.XXX.XXX"`: Bind the Dashboard to the IP or DNS hostname of this instance (required). +* `--mongo=mongodb://:/tyk_analytics`: Use the local MongoDB (should always be the same as the Gateway). +* `--storage=postgres`: In case, your preferred storage Database is PostgreSQL, use storage type "postgres" and specify connection string. +* `--connection_string=postgresql://:@:/`: Use the PostgreSQL instance provided in the connection string (should always be the same as the gateway). +* `--tyk_api_hostname=$HOSTNAME`: The Tyk Dashboard has no idea what hostname has been given to Tyk, so we need to tell it, in this instance we are just using the local HOSTNAME env variable, but you could set this to the public-hostname/IP of the instance. +* `--tyk_node_hostname=http://localhost`: The Tyk Dashboard needs to see a Tyk node in order to create new tokens, so we need to tell it where we can find one, in this case, use the one installed locally. +* `--tyk_node_port=8080`: Tell the Dashboard that the Tyk node it should communicate with is on port 8080. +* `--portal_root=/portal`: We want the Portal to be shown on /portal of whichever domain we set for the Portal. + +5. **Start Tyk Dashboard** + + ```bash + sudo service tyk-dashboard start + ``` + + + + To check the logs from the deployment run: + ```bash + sudo journalctl -u tyk-dashboard + ``` + + + + Notice how we haven't actually started the gateway yet, because this is a Dashboard install, we need to enter a license first. + + + +When using PostgreSQL you may receive the error: `"failed SASL auth (FATAL: password authentication failed for user...)"`, follow these steps to address the issue: +1. Open the terminal or command prompt on your PostgreSQL server. +2. Navigate to the location of the `pg_hba.conf` file. This file is typically located at `/var/lib/pgsql/13/data/pg_hba.conf`. +3. Open the `pg_hba.conf` file using a text manipulation tool. +4. In the `pg_hba.conf` file, locate the entry corresponding to the user encountering the authentication error. This entry might resemble the following: +```bash +host all all / scram-sha-256 +``` +5. In the entry, find the METHOD column. It currently has the value scram-sha-256. +6. Replace scram-sha-256 with md5, so the modified entry looks like this: +```bash +host all all / md5 +``` +7. Save the changes you made to the `pg_hba.conf` file. +8. Restart the PostgreSQL service to apply the modifications: +```bash +sudo systemctl restart postgresql-13 +``` + + + +6. **Enter Dashboard license** + + Add your license in `/var/opt/tyk-dashboard/tyk_analytics.conf` in the `license` field. + + If all is going well, you will be taken to a Dashboard setup screen - we'll get to that soon. + +7. **Restart the Dashboard process** + + Because we've just entered a license via the UI, we need to make sure that these changes get picked up, so to make sure things run smoothly, we restart the Dashboard process (you only need to do this once) and (if you have it installed) then start the gateway: + ```bash + sudo service tyk-dashboard restart + ``` + +8. **Go to the Tyk Dashboard URL** + + Go to the following URL to access to the Tyk Dashboard: + + ```bash + 127.0.0.1:3000 + ``` + + You should get to the Tyk Dashboard Setup screen: + + Tyk Dashboard Bootstrap Screen + +9. **Create your Organization and Default User** + + You need to enter the following: + + * Your **Organization Name** + * Your **Organization Slug** + * Your User **Email Address** + * Your User **First and Last Name** + * A **Password** for your User + * **Re-enter** your user **Password** + + + + + + For a password, we recommend a combination of alphanumeric characters, with both upper and lower case letters. + + + + + Click **Bootstrap** to save the details. + +10. **Login to the Dashboard** + + You can now log in to the Tyk Dashboard from `127.0.0.1:3000`, using the username and password created in the Dashboard Setup screen. + + **Configure your Developer Portal** + + To set up your [Developer Portal](/portal/overview/intro) follow our Self-Managed [tutorial on publishing an API to the Portal Catalog](/getting-started/tutorials/publish-api). + +#### Using Ansible + +**Getting Started** + +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repository + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. `cd` into the directory +```.bash +$ cd tyk-ansible +``` + +3. Run initialisation script to initialise environment + +```bash +$ sh scripts/init.sh +``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install `tyk-dashboard` + +```bash +$ ansible-playbook playbook.yaml -t tyk-dashboard +``` + +**Supported Distributions** + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Amazon Linux | 2 | βœ… | +| CentOS | 8 | βœ… | +| CentOS | 7 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | + +**Variables** + +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| dash.license | | Dashboard license| +| dash.service.host | | Dashboard server host if different than the hosts url | +| dash.service.port | `3000` | Dashboard server listening port | +| dash.service.proto | `http` | Dashboard server protocol | +| dash.service.tls | `false` | Set to `true` to enable SSL connections | + + + +### Install Pump + +#### Using Shell + +Tyk has it's own signed RPMs in a YUM repository hosted by the kind folks at [packagecloud.io](https://packagecloud.io), which makes it easy, safe and secure to install a trusted distribution of the Tyk Gateway stack. + +This tutorial will run on an [Amazon AWS](http://aws.amazon.com) *Red Hat Enterprise Linux 7.1* instance. We will install Tyk Pump with all dependencies stored locally. + +We're installing on a `t2.micro` because this is a tutorial, you'll need more RAM and more cores for better performance. + +This configuration should also work (with some tweaks) for CentOS. + +**Prerequisites** + +We are assuming that Redis and either MongoDB or SQL are installed (these are installed as part of the Tyk Gateway and Dashboard installation guides) + +**Step 1: Set up YUM Repositories** + +First, we need to install some software that allows us to use signed packages: +```bash +sudo yum install pygpgme yum-utils wget +``` + +Next, we need to set up the various repository configurations for Tyk and MongoDB: + +Create a file named `/etc/yum.repos.d/tyk_tyk-pump.repo` that contains the repository configuration below: + +Make sure to replace `el` and `7` in the config below with your Linux distribution and version: +```bash +[tyk_tyk-pump] +name=tyk_tyk-pump +baseurl=https://packagecloud.io/tyk/tyk-pump/el/7/$basearch +repo_gpgcheck=1 +gpgcheck=1 +enabled=1 +gpgkey=https://keyserver.tyk.io/tyk.io.rpm.signing.key.2020 + https://packagecloud.io/tyk/tyk-pump/gpgkey +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt +metadata_expire=300 +``` + +Finally we'll need to update our local cache, so run: +```bash +sudo yum -q makecache -y --disablerepo='*' --enablerepo='tyk_tyk-pump' +``` + +**Step 2: Install Packages** + +We're ready to go, you can now install the relevant packages using yum: +```bash +sudo yum install -y tyk-pump +``` + +**(You may be asked to accept the GPG key for our repos and when the package installs, hit yes to continue.)** +
+ +**Step 3: Configure Tyk Pump** + +If you don't complete this step, you won't see any analytics in your Dashboard, so to enable the analytics service, we need to ensure Tyk Pump is running and configured properly. + + +**Configure Tyk Pump for MongoDB** +
+ + +You need to replace `` for `--redishost=`, and ``, `` for `--mongo=mongodb://:/` with your own values to run this script. + + + +```bash +sudo /opt/tyk-pump/install/setup.sh --redishost= --redisport=6379 --mongo=mongodb://:/tyk_analytics +``` +**Configure Tyk Pump for SQL** +
+ + +You need to replace `` for `--redishost=`, and ``,``, ``, ``, `` for `--postgres="host= port= user= password= dbname="` with your own values to run this script. + + +```bash +sudo /opt/tyk-pump/install/setup.sh --redishost= --redisport=6379 --postgres="host= port= user= password= dbname=" +``` + + +**Step 4: Start Tyk Pump** + +```bash +sudo service tyk-pump start +``` + +That's it, the Pump should now be up and running. + +You can verify if Tyk Pump is running and working by accessing the logs: +```bash +sudo journalctl -u tyk-pump +``` + + + +#### Using Ansible + +**Getting Started** + +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repositry + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. `cd` into the directory +```.bash +$ cd tyk-ansible +``` + +3. Run initialisation script to initialise environment + +```bash +$ sh scripts/init.sh +``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install `tyk-pump` + +```bash +$ ansible-playbook playbook.yaml -t tyk-pump +``` + +**Supported Distributions** + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Amazon Linux | 2 | βœ… | +| CentOS | 8 | βœ… | +| CentOS | 7 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | + + + +### Install Gateway + +#### Using Shell + +Tyk has it's own signed RPMs in a YUM repository hosted by the kind folks at [packagecloud.io](https://packagecloud.io/tyk/tyk-dashboard/install#manual-rpm), which makes it easy, safe and secure to install a trusted distribution of the Tyk Gateway stack. + +This tutorial will run on an [Amazon AWS](http://aws.amazon.com) *Red Hat Enterprise Linux 7.1* instance. We will install Tyk Gateway with all dependencies stored locally. + +We're installing on a `t2.micro` because this is a tutorial, you'll need more RAM and more cores for better performance. + +This configuration should also work (with some tweaks) for CentOS. + +**Prerequisites** + +* Ensure port `8080` is open: this is used in this guide for Gateway traffic (API traffic to be proxied) +* EPEL (Extra Packages for Enterprise Linux) is a free, community based repository project from Fedora which provides high quality add-on software packages for Linux distribution including RHEL, CentOS, and Scientific Linux. EPEL isn’t a part of RHEL/CentOS but it is designed for major Linux distributions. In our case we need it for Redis. Install EPEL using the instructions here. + +**Step 1: Set up YUM Repositories** + +First, we need to install some software that allows us to use signed packages: +```bash +sudo yum install pygpgme yum-utils wget +``` + +Next, we need to set up the various repository configurations for Tyk and MongoDB: + +**Step 2: Create Tyk Gateway Repository Configuration** + +Create a file named `/etc/yum.repos.d/tyk_tyk-gateway.repo` that contains the repository configuration below https://packagecloud.io/tyk/tyk-gateway/install#manual-rpm: +```bash +[tyk_tyk-gateway] +name=tyk_tyk-gateway +baseurl=https://packagecloud.io/tyk/tyk-gateway/el/7/$basearch +repo_gpgcheck=1 +gpgcheck=1 +enabled=1 +gpgkey=https://keyserver.tyk.io/tyk.io.rpm.signing.key.2020 + https://packagecloud.io/tyk/tyk-gateway/gpgkey +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt +metadata_expire=300 +``` + +**Step 3: Install Packages** + +We're ready to go, you can now install the relevant packages using yum: +```bash +sudo yum install -y redis tyk-gateway +``` + +*(you may be asked to accept the GPG key for our two repos and when the package installs, hit yes to continue)* + +**Step 4: Start Redis** + +In many cases Redis will not be running, so let's start those: +```bash +sudo service redis start +``` + +When Tyk is finished installing, it will have installed some init scripts, but it will not be running yet. The next step will be to setup the Gateway – thankfully this can be done with three very simple commands. + + + +#### Using Ansible + +**Requirements** + +[Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) - required for running the commands below. Use the **Shell** tab for instructions to install Tyk from a shell. + +**Getting Started** + +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repositry + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. `cd` into the directory +```.bash +$ cd tyk-ansible +``` + +3. Run initialisation script to initialise environment + +```bash +$ sh scripts/init.sh +``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install `tyk-gateway` + +```bash +$ ansible-playbook playbook.yaml -t `tyk-gateway-pro` or `tyk-gateway-hybrid` +``` + +**Supported Distributions** + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Amazon Linux | 2 | βœ… | +| CentOS | 8 | βœ… | +| CentOS | 7 | βœ… | +| RHEL | 8 | βœ… | +| RHEL | 7 | βœ… | + +**Variables** + +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| gateway.service.host | | Gateway server host if different than the hosts url | +| gateway.service.port | `8080` | Gateway server listening port | +| gateway.service.proto | `http` | Gateway server protocol | +| gateway.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.sharding.enabled | `false` | Set to `true` to enable filtering (sharding) of APIs | +| gateway.sharding.tags | | The tags to use when filtering (sharding) Tyk Gateway nodes. Tags are processed as OR operations. If you include a non-filter tag (e.g. an identifier such as `node-id-1`, this will become available to your Dashboard analytics) | +| gateway.rpc.connString | | Use this setting to add the URL for your MDCB or load balancer host | +| gateway.rpc.useSSL | `true` | Set this option to `true` to use an SSL RPC connection| +| gateway.rpc.sslInsecureSkipVerify | `true` | Set this option to `true` to allow the certificate validation (certificate chain and hostname) to be skipped. This can be useful if you use a self-signed certificate | +| gateway.rpc.rpcKey | | Your organization ID to connect to the MDCB installation | +| gateway.rpc.apiKey | | This the API key of a user used to authenticate and authorize the Gateway’s access through MDCB. The user should be a standard Dashboard user with minimal privileges so as to reduce any risk if the user is compromised. The suggested security settings are read for Real-time notifications and the remaining options set to deny | +| gateway.rpc.groupId | | This is the `zone` that this instance inhabits, e.g. the cluster/data-center the Gateway lives in. The group ID must be the same across all the Gateways of a data-center/cluster which are also sharing the same Redis instance. This ID should also be unique per cluster (otherwise another Gateway cluster can pick up your keyspace events and your cluster will get zero updates). | +##### Configure Tyk Gateway with the Dashboard + +**Prerequisites** + +This configuration assumes that you have already installed Tyk Dashboard, and have decided on the domain names for your Dashboard and your Portal. **They must be different**. For testing purposes, it is easiest to add hosts entries to your (and your servers) `/etc/hosts` file. + +**Set up Tyk Gateway with Quick Start Script** + +You can set up the core settings for Tyk Gateway with a single setup script, however for more involved deployments, you will want to provide your own configuration file. + + + +You need to replace `` for `--redishost=`with your own value to run this script. + + + +```bash +sudo /opt/tyk-gateway/install/setup.sh --dashboard=1 --listenport=8080 --redishost= --redisport=6379 +``` + +What we've done here is told the setup script that: + +* `--dashboard=1`: We want to use the Dashboard, since Tyk Gateway gets all it's API Definitions from the Dashboard service, as of v2.3 Tyk will auto-detect the location of the dashboard, we only need to specify that we should use this mode. +* `--listenport=8080`: Tyk should listen on port 8080 for API traffic. +* `--redishost=`: Use Redis on the hostname: localhost. +* `--redisport=6379`: Use the default Redis port. + +**Starting Tyk** + +The Tyk Gateway can be started now that it is configured. Use this command to start the Tyk Gateway: +```bash +sudo service tyk-gateway start +``` + +**Pro Tip: Domains with Tyk Gateway** + +Tyk Gateway has full domain support built-in, you can: + +* Set Tyk to listen only on a specific domain for all API traffic. +* Set an API to listen on a specific domain (e.g. api1.com, api2.com). +* Split APIs over a domain using a path (e.g. api.com/api1, api.com/api2, moreapis.com/api1, moreapis.com/api2 etc). +* If you have set a hostname for the Gateway, then all non-domain-bound APIs will be on this hostname + the `listen_path`. + +## Install Tyk on Debian or Ubuntu + +### Install Database + +#### Using Shell + +**Requirements** + +Before installing the Tyk components in the order below, you need to first install Redis and MongoDB/SQL. + +**Getting Started** + + + +**Install MongoDB 4.0** + +You should follow the [online tutorial for installing MongoDb](https://docs.mongodb.com/v4.0/tutorial/install-mongodb-on-ubuntu/). We will be using version 4.0. As part of the Mongo installation you need to perform the following: + +1. Import the public key +2. Create a list file +3. Reload the package database +4. Install the MongoDB packages +5. Start MongoDB +6. Check the `mongod` service is running + + + + +**Install SQL** + +You should follow the [online tutorial for installing PostgreSQL](https://www.postgresql.org/download/linux/ubuntu/). We will be using version 13. As part of the PostgreSQL installation you need to perform the following: + +1. Create the file repository configuration +2. Import the repository signing key +3. Update the package lists +4. Install the PostgreSQL packages +5. Start PostgreSQL +6. Check the `postgresql` service is running + +See [SQL configuration](/planning-for-production/database-settings#postgresql) for details on installing SQL in a production environment. + + + +**Install Redis** + +```console +$ sudo apt-get install -y redis-server +``` + +**Install Tyk Pro on Ubuntu** + +Installing Tyk on Ubuntu is very straightforward using our APT repositories, follow the guides and tutorials in this section to have Tyk up and running in no time. + +The suggested order would be to install Tyk Dashboard, then Tyk Pump and then Tyk Gateway for a full stack. + +- [Dashboard](/tyk-self-managed/install#Debian-Ubuntu-install-dashboard) +- [Pump](/tyk-self-managed/install#Debian-Ubuntu-install-pump) +- [Gateway](/tyk-self-managed/install#debian-ubuntu-install-gateway) + + + + + For a production environment, we recommend that the Gateway, Dashboard and Pump are installed on separate machines. If installing multiple Gateways, you should install each on a separate machine. See [Planning for Production](/planning-for-production) For more details. + + + + +#### Using Ansible + +**Requirements** + + +[Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) - required for running the commands below. Use the **Shell** tab for instructions to install Tyk from a shell. + +**Getting Started** + +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repositry + +```console +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. `cd` into the directory +```console +$ cd tyk-ansible +``` + +3. Run initialisation script to initialise environment + +```console +$ sh scripts/init.sh +``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install the following: +- Redis +- MongoDB or PostgreSQL +- Tyk Dashboard +- Tyk Gateway +- Tyk Pump + +```console +$ ansible-playbook playbook.yaml -t tyk-pro -t redis -t `mongodb` or `pgsql` +``` + +You can choose to not install Redis, MongoDB or PostgreSQL by removing the `-t redis` or `-t mongodb` or `-t pgsql` However Redis and MongoDB or PostgreSQL are a requirement and need to be installed for the Tyk Pro installation to run. + +**Supported Distributions** + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Debian | 10 | βœ… | +| Debian | 9 | βœ… | +| Ubuntu | 21 | βœ… | +| Ubuntu | 20 | βœ… | +| Ubuntu | 18 | βœ… | +| Ubuntu | 16 | βœ… | + +**Variables** + +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| redis.host | | Redis server host if different than the hosts url | +| redis.port | `6379` | Redis server listening port | +| redis.pass | | Redis server password | +| redis.enableCluster | `false` | Enable if redis is running in cluster mode | +| redis.storage.database | `0` | Redis server database | +| redis.tls | `false` | Enable if redis connection is secured with SSL | +| mongo.host | | MongoDB server host if different than the hosts url | +| mongo.port | `27017` | MongoDB server listening port | +| mongo.tls | `false` | Enable if mongo connection is secured with SSL | +| pgsql.host | | PGSQL server host if different than the hosts url | +| pgsql.port | `5432` | PGSQL server listening port | +| pgsql.tls | `false` | Enable if pgsql connection is secured with SSL | +| dash.license | | Dashboard license| +| dash.service.host | | Dashboard server host if different than the hosts url | +| dash.service.port | `3000` | Dashboard server listening port | +| dash.service.proto | `http` | Dashboard server protocol | +| dash.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.service.host | | Gateway server host if different than the hosts url | +| gateway.service.port | `8080` | Gateway server listening port | +| gateway.service.proto | `http` | Gateway server protocol | +| gateway.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.sharding.enabled | `false` | Set to `true` to enable filtering (sharding) of APIs | +| gateway.sharding.tags | | The tags to use when filtering (sharding) Tyk Gateway nodes. Tags are processed as OR operations. If you include a non-filter tag (e.g. an identifier such as `node-id-1`, this will become available to your Dashboard analytics) | +| gateway.rpc.connString | | Use this setting to add the URL for your MDCB or load balancer host | +| gateway.rpc.useSSL | `true` | Set this option to `true` to use an SSL RPC connection| +| gateway.rpc.sslInsecureSkipVerify | `true` | Set this option to `true` to allow the certificate validation (certificate chain and hostname) to be skipped. This can be useful if you use a self-signed certificate | +| gateway.rpc.rpcKey | | Your organization ID to connect to the MDCB installation | +| gateway.rpc.apiKey | | This the API key of a user used to authenticate and authorize the Gateway’s access through MDCB. The user should be a standard Dashboard user with minimal privileges so as to reduce any risk if the user is compromised. The suggested security settings are read for Real-time notifications and the remaining options set to deny | +| gateway.rpc.groupId | | This is the `zone` that this instance inhabits, e.g. the cluster/data-center the Gateway lives in. The group ID must be the same across all the Gateways of a data-center/cluster which are also sharing the same Redis instance. This ID should also be unique per cluster (otherwise another Gateway cluster can pick up your keyspace events and your cluster will get zero updates). | + +- `vars/redis.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| redis_bind_interface | `0.0.0.0` | Binding address of Redis | + +Read more about Redis configuration [here](https://github.com/geerlingguy/ansible-role-redis). + +- `vars/mongodb.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| bind_ip | `0.0.0.0` | Binding address of MongoDB | +| mongodb_version | `4.4` | MongoDB version | + +Read more about MongoDB configuration [here](https://github.com/ansible-collections/community.mongodb). + +- `vars/pgsql.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| postgresql_databases[] | `[]` | Array of DBs to be created | +| postgresql_databases[].name | `tyk_analytics` | Database name | +| postgresql_users[] | `[]` | Array of users to be created | +| postgresql_users[`0`].name | `default` | User name | +| postgresql_users[`0`].password | `topsecretpassword` | User password | +| postgresql_global_config_options[] | `[]` | Postgres service config options | +| postgresql_global_config_options[`1`].option | `listen_addresses` | Listen address binding for the service | +| postgresql_global_config_options[`1`].value | `*` | Default value to listen to all addresses | +| postgresql_hba_entries[] | `[]` | Host based authenticaiton list| +| postgresql_hba_entries[`4`].type | `host` | Entry type | +| postgresql_hba_entries[`4`].database | `tyk_analytics` | Which database this entry will give access to | +| postgresql_hba_entries[`4`].user | `default` | What users this gain access from this entry | +| postgresql_hba_entries[`4`].address | `0.0.0.0/0` | What addresses this gain access from this entry | +| postgresql_hba_entries[`4`].auth_method | `md5` | What authentication method to to use for the users | + +Read more about PostgreSQL configuration [here](https://github.com/geerlingguy/ansible-role-postgresql). + +### Install Dashboard + + +#### Using Shell + +Tyk has its own APT repositories hosted by the kind folks at [packagecloud.io](https://packagecloud.io/tyk), which makes it easy, safe and secure to install a trusted distribution of the Tyk Gateway stack. + +This tutorial has been tested on Ubuntu 16.04 & 18.04 with few if any modifications. We will install the Tyk Dashboard with all dependencies locally. + +**Prerequisites** +- Have MongoDB/SQL and Redis installed - follow the guide for [installing databases on Debian/Ubuntu](#install-tyk-on-debian-or-ubuntu). +- Ensure port `3000` is available. This is used by the Tyk Dashboard to provide the GUI and the Developer Portal. + +**Step 1: Set up our APT Repositories** + +First, add our GPG key which signs our binaries: + +```bash +curl -L https://packagecloud.io/tyk/tyk-dashboard/gpgkey | sudo apt-key add - +``` + +Run update: + +```bash +sudo apt-get update +``` + +Since our repositories are installed via HTTPS, you will need to make sure APT supports this: + +```bash +sudo apt-get install -y apt-transport-https +``` + +Now lets add the required repos and update again (notice the `-a` flag in the second Tyk commands - this is important!): + +```bash +echo "deb https://packagecloud.io/tyk/tyk-dashboard/ubuntu/ bionic main" | sudo tee /etc/apt/sources.list.d/tyk_tyk-dashboard.list + +echo "deb-src https://packagecloud.io/tyk/tyk-dashboard/ubuntu/ bionic main" | sudo tee -a /etc/apt/sources.list.d/tyk_tyk-dashboard.list + +sudo apt-get update +``` + + + +`bionic` is the code name for Ubuntu 18.04. Please substitute it with your particular [ubuntu release](https://releases.ubuntu.com/), e.g. `focal`. + + + +**What we've done here is:** + +- Added the Tyk Dashboard repository +- Updated our package list + +**Step 2: Install the Tyk Dashboard** + +We're now ready to install the Tyk Dashboard. To install run: + +```bash +sudo apt-get install -y tyk-dashboard +``` + +What we've done here is instructed `apt-get` to install the Tyk Dashboard without prompting. Wait for the downloads to complete. + +When the Tyk Dashboard has finished installing, it will have installed some `init` scripts, but it will not be running yet. The next step will be to setup each application - thankfully this can be done with three very simple commands. + +**Verify the origin key (optional)** + +Debian packages are signed with the repository keys. These keys are verified at the time of fetching the package and is taken care of by the `apt` infrastructure. These keys are controlled by PackageCloud, our repository provider. For an additional guarantee, it is possible to verify that the package was indeed created by Tyk by verifying the `origin` certificate that is attached to the package. + +First, you have to fetch Tyk's signing key and import it. + +```bash +wget https://keyserver.tyk.io/tyk.io.deb.signing.key +gpg --import tyk.io.deb.signing.key +``` + +Then, you have to either, +- sign the key with your ultimately trusted key +- trust this key ultimately + +The downloaded package will be available in `/var/cache/apt/archives`. Assuming you found the file `tyk-gateway-2.9.4_amd64.deb` there, you can verify the origin signature. + +```bash +gpg --verify d.deb +gpg: Signature made Wed 04 Mar 2020 03:05:00 IST +gpg: using RSA key F3781522A858A2C43D3BC997CA041CD1466FA2F8 +gpg: Good signature from "Team Tyk (package signing) " [ultimate] +``` + +##### **Configure Tyk Dashboard** + +**Prerequisites for MongoDB** + +You need to ensure the MongoDB and Redis services are running before proceeding. + + + +You need to replace `` for `--redishost=`, and `` for `--mongo=mongodb:///` with your own values to run this script. + + + + +You can set your Tyk Dashboard up with a helper setup command script. This will get the Dashboard set up for the local instance: + +```bash +sudo /opt/tyk-dashboard/install/setup.sh --listenport=3000 --redishost= --redisport=6379 --mongo=mongodb:///tyk_analytics --tyk_api_hostname=$HOSTNAME --tyk_node_hostname=http://localhost --tyk_node_port=8080 --portal_root=/portal --domain="XXX.XXX.XXX.XXX" +``` + + + +Make sure to use the actual DNS hostname or the public IP of your instance as the last parameter. + + + + +What we have done here is: + +- `--listenport=3000`: Told the Tyk Dashboard (and Portal) to listen on port 3000. +- `--redishost=`: The Tyk Dashboard should use the local Redis instance. +- `--redisport=6379`: The Tyk Dashboard should use the default port. +- `--domain="XXX.XXX.XXX.XXX"`: Bind the Tyk Dashboard to the IP or DNS hostname of this instance (required). +- `--mongo=mongodb:///tyk_analytics`: Use the local MongoDB (should always be the same as the gateway). +- `--tyk_api_hostname=$HOSTNAME`: The Tyk Dashboard has no idea what hostname has been given to Tyk, so we need to tell it, in this instance we are just using the local HOSTNAME env variable, but you could set this to the public-hostname/IP of the instance. +- `--tyk_node_hostname=http://localhost`: The Tyk Dashboard needs to see a Tyk node in order to create new tokens, so we need to tell it where we can find one, in this case, use the one installed locally. +- `--tyk_node_port=8080`: Tell the Tyk Dashboard that the Tyk node it should communicate with is on port 8080. +- `--portal_root=/portal`: We want the portal to be shown on `/portal` of whichever domain we set for the portal. + +**Prerequisites for SQL** + +You need to ensure the PostgreSQL and Redis services are running before proceeding. + + + +You need to replace `` for `--redishost=`, and ``, ``, ``, ``, `` for `--connection_string="host= port= user= password= dbname="` with your own values to run this script. + + + + +You can set the Tyk Dashboard up with a helper setup command script. This will get the Dashboard set up for the local instance: + +```bash +sudo /opt/tyk-dashboard/install/setup.sh --listenport=3000 --redishost= --redisport=6379 --storage=postgres --connection_string="host= port= user= password= dbname=" --tyk_api_hostname=$HOSTNAME --tyk_node_hostname=http://localhost --tyk_node_port=8080 --portal_root=/portal --domain="XXX.XXX.XXX.XXX" +``` + + + +Make sure to use the actual DNS hostname or the public IP of your instance as the last parameter. + + + + +What we have done here is: + +- `--listenport=3000`: Told the Tyk Dashboard (and Portal) to listen on port 3000. +- `--redishost=`: The Tyk Dashboard should use the local Redis instance. +- `--redisport=6379`: The Tyk Dashboard should use the default port. +- `--domain="XXX.XXX.XXX.XXX"`: Bind the dashboard to the IP or DNS hostname of this instance (required). +- `--storage=postgres`: Use storage type postgres. +- `--connection_string="host= port= user= password= dbname="`: Use the postgres instance provided in the connection string(should always be the same as the gateway). +- `--tyk_api_hostname=$HOSTNAME`: The Tyk Dashboard has no idea what hostname has been given to Tyk, so we need to tell it, in this instance we are just using the local HOSTNAME env variable, but you could set this to the public-hostname/IP of the instance. +- `--tyk_node_hostname=http://localhost`: The Tyk Dashboard needs to see a Tyk node in order to create new tokens, so we need to tell it where we can find one, in this case, use the one installed locally. +- `--tyk_node_port=8080`: Tell the dashboard that the Tyk node it should communicate with is on port 8080. +- `--portal_root=/portal`: We want the portal to be shown on `/portal` of whichever domain we set for the portal. + + +**Step 1: Enter your Tyk Dashboard License** + +Add your license in `/opt/tyk-dashboard/tyk_analytics.conf` in the `license` field. + +**Step 2: Start the Tyk Dashboard** + +Start the dashboard service, and ensure it will start automatically on system boot. + +```bash +sudo systemctl start tyk-dashboard +sudo systemctl enable tyk-dashboard +``` + +**Step 3: Install your Tyk Gateway** + +Follow the [Gateway installation instructions](#using-shell-7) to connect to your Dashboard instance before you continue on to step 4. + +**Step 4: Bootstrap the Tyk Dashboard with an initial User and Organization** + +Go to: + +```bash +127.0.0.1:3000 +``` + +You should get to the Tyk Dashboard Setup screen: + +Tyk Dashboard Bootstrap Screen + +**Step 5 - Create your Organization and Default User** + +You need to enter the following: + +- Your **Organization Name** +- Your **Organization Slug** +- Your User **Email Address** +- Your User **First and Last Name** +- A **Password** for your User +- **Re-enter** your user **Password** + + + + + For a password, we recommend a combination of alphanumeric characters, with both upper and lower case + letters. + + + +Click **Bootstrap** to save the details. + +**Step 6 - Login to the Tyk Dashboard** + +You can now log in to the Tyk Dashboard from `127.0.0.1:3000`, using the username and password created in the Dashboard Setup screen. + +##### **Configure your Developer Portal** + +To set up your [Developer Portal](/portal/overview/intro) follow our Self-Managed [tutorial on publishing an API to the Portal Catalog](/getting-started/tutorials/publish-api). + +#### Using Ansible + +**Getting Started** + +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repositry + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. `cd` into the directory +```.bash +$ cd tyk-ansible +``` + +3. Run initialisation script to initialise environment + +```bash +$ sh scripts/init.sh +``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install `tyk-dashboard` + +```bash +$ ansible-playbook playbook.yaml -t tyk-dashboard +``` + +**Supported Distributions** + +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Debian | 10 | βœ… | +| Debian | 9 | βœ… | +| Ubuntu | 21 | βœ… | +| Ubuntu | 20 | βœ… | +| Ubuntu | 18 | βœ… | +| Ubuntu | 16 | βœ… | + +**Variables** + +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| dash.license | | Dashboard license| +| dash.service.host | | Dashboard server host if different than the hosts url | +| dash.service.port | `3000` | Dashboard server listening port | +| dash.service.proto | `http` | Dashboard server protocol | +| dash.service.tls | `false` | Set to `true` to enable SSL connections | + +### Install Pump + + +#### Using Shell + +This tutorial has been tested Ubuntu 16.04 & 18.04 with few if any modifications. + +**Prerequisites** + +- You have installed Redis and either MongoDB or SQL. +- You have installed the Tyk Dashboard. + +**Step 1: Set up our APT repositories** + +First, add our GPG key which signs our binaries: + +```bash +curl -L https://packagecloud.io/tyk/tyk-pump/gpgkey | sudo apt-key add - +``` + +Run update: + +```bash +sudo apt-get update +``` + +Since our repositories are installed via HTTPS, you will need to make sure APT supports this: + +```bash +sudo apt-get install -y apt-transport-https +``` + +Now lets add the required repos and update again (notice the `-a` flag in the second Tyk commands - this is important!): + +```bash +echo "deb https://packagecloud.io/tyk/tyk-pump/ubuntu/ bionic main" | sudo tee /etc/apt/sources.list.d/tyk_tyk-pump.list + +echo "deb-src https://packagecloud.io/tyk/tyk-pump/ubuntu/ bionic main" | sudo tee -a /etc/apt/sources.list.d/tyk_tyk-pump.list + +sudo apt-get update +``` + + + +`bionic` is the code name for Ubuntu 18.04. Please substitute it with your particular [ubuntu release](https://releases.ubuntu.com/), e.g. `focal`. + + + +**What you've done here is:** + +- Added the Tyk Pump repository +- Updated our package list + +**Step 2: Install the Tyk Pump** + +You're now ready to install the Tyk Pump. To install it, run: + +```bash +sudo apt-get install -y tyk-pump +``` + +What you've done here is instructed `apt-get` to install Tyk Pump without prompting. Wait for the downloads to complete. + +When Tyk Pump has finished installing, it will have installed some `init` scripts, but it will not be running yet. The next step will be to setup each application using three very simple commands. + +**Verify the origin key (optional)** + +Debian packages are signed with the repository keys. These keys are verified at the time of fetching the package and is taken care of by the `apt` infrastructure. These keys are controlled by PackageCloud, our repository provider. For an additional guarantee, it is possible to verify that the package was indeed created by Tyk by verifying the `origin` certificate that is attached to the package. + +First, you have to fetch Tyk's signing key and import it. + +```bash +wget https://keyserver.tyk.io/tyk.io.deb.signing.key +gpg --import tyk.io.deb.signing.key +``` + +Then, you have to either, +- sign the key with your ultimately trusted key +- trust this key ultimately + +The downloaded package will be available in `/var/cache/apt/archives`. Assuming you found the file `tyk-gateway-2.9.3_amd64.deb` there, you can verify the origin signature. + +```bash +gpg --verify d.deb +gpg: Signature made Wed 04 Mar 2020 03:05:00 IST +gpg: using RSA key F3781522A858A2C43D3BC997CA041CD1466FA2F8 +gpg: Good signature from "Team Tyk (package signing) " [ultimate] +``` + +**Step 3: Configure Tyk Pump** + +If you don't complete this step, you won't see any analytics in your Dashboard, so to enable the analytics service, we need to ensure Tyk Pump is running and configured properly. + +**Option 1: Configure Tyk Pump for MongoDB** +
+ + +You need to replace `` for `--redishost=`, and `` for `--mongo=mongodb:///` with your own values to run this script. + + + +```bash +sudo /opt/tyk-pump/install/setup.sh --redishost= --redisport=6379 --mongo=mongodb:///tyk_analytics +``` + +**Option 2: Configure Tyk Pump for SQL** +
+ + +You need to replace `` for `--redishost=`, and ``,``, ``, ``, `` for `--postgres="host= port= user= password= dbname="` with your own values to run this script. + + + +```bash +sudo /opt/tyk-pump/install/setup.sh --redishost= --redisport=6379 --postgres="host= port= user= password= dbname=" +``` + +**Step 4: Start Tyk Pump** + +```bash +sudo service tyk-pump start +sudo service tyk-pump enable +``` + +You can verify if Tyk Pump is running and working by tailing the log file: + +```bash +sudo tail -f /var/log/upstart/tyk-pump.log +``` + +#### Using Ansible + +**Install Tyk Pump Through Ansible** + +**Getting Started** +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repositry + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. `cd` into the directory +```.bash +$ cd tyk-ansible +``` + +3. Run initialisation script to initialise environment + +```bash +$ sh scripts/init.sh +``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install `tyk-pump` + +```bash +$ ansible-playbook playbook.yaml -t tyk-pump +``` + +**Supported Distributions** +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Debian | 10 | βœ… | +| Debian | 9 | βœ… | +| Ubuntu | 21 | βœ… | +| Ubuntu | 20 | βœ… | +| Ubuntu | 18 | βœ… | +| Ubuntu | 16 | βœ… | + +### Install Gateway + + +#### Using Shell + +Tyk has it's own APT repositories hosted by the kind folks at [packagecloud.io][1], which makes it easy, safe and secure to install a trusted distribution of the Tyk Gateway stack. + +This tutorial has been tested on Ubuntu 16.04 & 18.04 with few if any modifications. + +Please note however, that should you wish to write your own plugins in Python, we currently have a Python version dependency of 3.4. Python-3.4 ships with Ubuntu 14.04, however you may need to explicitly install it on newer Ubuntu Operating System releases. + +**Prerequisites** + +* Ensure port `8080` is available. This is used in this guide for Gateway traffic (API traffic to be proxied). +* You have MongoDB and Redis installed. +* You have installed firstly the Tyk Dashboard, then the Tyk Pump. + +**Step 1: Set up our APT Repositories** + +First, add our GPG key which signs our binaries: + +```bash +curl -L https://packagecloud.io/tyk/tyk-gateway/gpgkey | sudo apt-key add - +``` + +Run update: +```bash +sudo apt-get update +``` + +Since our repositories are installed via HTTPS, you will need to make sure APT supports this: +```bash +sudo apt-get install -y apt-transport-https +``` + +Create a file `/etc/apt/sources.list.d/tyk_tyk-gateway.list` with the following contents: +```bash +deb https://packagecloud.io/tyk/tyk-gateway/ubuntu/ bionic main +deb-src https://packagecloud.io/tyk/tyk-gateway/ubuntu/ bionic main +``` + + +`bionic` is the code name for Ubuntu 18.04. Please substitute it with your particular [ubuntu release](https://releases.ubuntu.com/), e.g. `focal`. + + + +Now you can refresh the list of packages with: +```bash +sudo apt-get update +``` + +**What we've done here is:** + +* Added the Tyk Gateway repository +* Updated our package list + +**Step 2: Install the Tyk Gateway** + +We're now ready to install the Tyk Gateway. To install it, run: + +```bash +sudo apt-get install -y tyk-gateway +``` +What we've done here is instructed apt-get to install the Tyk Gateway without prompting, wait for the downloads to complete. + +When Tyk has finished installing, it will have installed some init scripts, but will not be running yet. The next step will be to set up the Gateway - thankfully this can be done with three very simple commands, however it does depend on whether you are configuring Tyk Gateway for use with the Dashboard or without (the Community Edition). + +**Verify the origin key (optional)** + +Debian packages are signed with the repository keys. These keys are verified at the time of fetching the package and is taken care of by the `apt` infrastructure. These keys are controlled by PackageCloud, our repository provider. For an additional guarantee, it is possible to verify that the package was indeed created by Tyk by verifying the `origin` certificate that is attached to the package. + +First, you have to fetch Tyk's signing key and import it. + +```bash +wget https://keyserver.tyk.io/tyk.io.deb.signing.key +gpg --import tyk.io.deb.signing.key +``` + +Then, you have to either, +- sign the key with your ultimately trusted key +- trust this key ultimately + +The downloaded package will be available in `/var/cache/apt/archives`. Assuming you found the file `tyk-gateway-2.9.4_amd64.deb` there, you can verify the origin signature. + +```bash +gpg --verify d.deb +gpg: Signature made Wed 04 Mar 2020 03:05:00 IST +gpg: using RSA key F3781522A858A2C43D3BC997CA041CD1466FA2F8 +gpg: Good signature from "Team Tyk (package signing) " [ultimate] +``` + +**Configure Tyk Gateway with Dashboard** + +**Prerequisites** + +This configuration assumes that you have already installed the Tyk Dashboard, and have decided on the domain names for your Dashboard and your Portal. **They must be different**. For testing purposes, it is easiest to add hosts entries to your (and your servers) `/etc/hosts` file. + +**Set up Tyk** + +You can set up the core settings for Tyk Gateway with a single setup script, however for more involved deployments, you will want to provide your own configuration file. + + + +You need to replace `` for `--redishost=`with your own value to run this script. + + + + +```bash +sudo /opt/tyk-gateway/install/setup.sh --dashboard=1 --listenport=8080 --redishost= --redisport=6379 +``` + +What we've done here is told the setup script that: + +* `--dashboard=1`: We want to use the Dashboard, since Tyk Gateway gets all it's API Definitions from the Dashboard service, as of v2.3 Tyk will auto-detect the location of the dashboard, we only need to specify that we should use this mode. +* `--listenport=8080`: Tyk should listen on port 8080 for API traffic. +* `--redishost=`: Use Redis on your hostname. +* `--redisport=6379`: Use the default Redis port. + +**Starting Tyk** + +The Tyk Gateway can be started now that it is configured. Use this command to start the Tyk Gateway: +```bash +sudo service tyk-gateway start +sudo service tyk-gateway enable +``` + +**Pro Tip: Domains with Tyk Gateway** + +Tyk Gateway has full domain support built-in, you can: + +* Set Tyk to listen only on a specific domain for all API traffic. +* Set an API to listen on a specific domain (e.g. api1.com, api2.com). +* Split APIs over a domain using a path (e.g. api.com/api1, api.com/api2, moreapis.com/api1, moreapis.com/api2 etc). +* If you have set a hostname for the Gateway, then all non-domain-bound APIs will be on this hostname + the `listen_path`. + +[1]: https://packagecloud.io/tyk + + +#### Using Ansible + +**Getting Started** +1. clone the [tyk-ansible](https://github.com/TykTechnologies/tyk-ansible) repositry + +```bash +$ git clone https://github.com/TykTechnologies/tyk-ansible +``` + +2. `cd` into the directory +```.bash +$ cd tyk-ansible +``` + +3. Run initialisation script to initialise environment + +```bash +$ sh scripts/init.sh +``` + +4. Modify `hosts.yml` file to update ssh variables to your server(s). You can learn more about the hosts file [here](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) + +5. Run ansible-playbook to install `tyk-gateway` + +```bash +$ ansible-playbook playbook.yaml -t `tyk-gateway-pro` or `tyk-gateway-hybrid` +``` + +**Supported Distributions** +| Distribution | Version | Supported | +| --------- | :---------: | :---------: | +| Debian | 10 | βœ… | +| Debian | 9 | βœ… | +| Ubuntu | 21 | βœ… | +| Ubuntu | 20 | βœ… | +| Ubuntu | 18 | βœ… | +| Ubuntu | 16 | βœ… | + +**Variables** +- `vars/tyk.yaml` + +| Variable | Default | Comments | +| --------- | :---------: | --------- | +| secrets.APISecret | `352d20ee67be67f6340b4c0605b044b7` | API secret | +| secrets.AdminSecret | `12345` | Admin secret | +| gateway.service.host | | Gateway server host if different than the hosts url | +| gateway.service.port | `8080` | Gateway server listening port | +| gateway.service.proto | `http` | Gateway server protocol | +| gateway.service.tls | `false` | Set to `true` to enable SSL connections | +| gateway.sharding.enabled | `false` | Set to `true` to enable filtering (sharding) of APIs | +| gateway.sharding.tags | | The tags to use when filtering (sharding) Tyk Gateway nodes. Tags are processed as OR operations. If you include a non-filter tag (e.g. an identifier such as `node-id-1`, this will become available to your Dashboard analytics) | +| gateway.rpc.connString | | Use this setting to add the URL for your MDCB or load balancer host | +| gateway.rpc.useSSL | `true` | Set this option to `true` to use an SSL RPC connection| +| gateway.rpc.sslInsecureSkipVerify | `true` | Set this option to `true` to allow the certificate validation (certificate chain and hostname) to be skipped. This can be useful if you use a self-signed certificate | +| gateway.rpc.rpcKey | | Your organization ID to connect to the MDCB installation | +| gateway.rpc.apiKey | | This the API key of a user used to authenticate and authorize the Gateway’s access through MDCB. The user should be a standard Dashboard user with minimal privileges so as to reduce any risk if the user is compromised. The suggested security settings are read for Real-time notifications and the remaining options set to deny | +| gateway.rpc.groupId | | This is the `zone` that this instance inhabits, e.g. the cluster/data-center the Gateway lives in. The group ID must be the same across all the Gateways of a data-center/cluster which are also sharing the same Redis instance. This ID should also be unique per cluster (otherwise another Gateway cluster can pick up your keyspace events and your cluster will get zero updates). | + + diff --git a/tyk-stack.mdx b/tyk-stack.mdx new file mode 100644 index 000000000..b69cc862b --- /dev/null +++ b/tyk-stack.mdx @@ -0,0 +1,25 @@ +--- +title: "Tyk Stack" +description: "Overview of Tyk Stack components, both open-source and closed-source." +order: 7 +sidebarTitle: "Tyk Stack" +--- + +import OssProductListInclude from '/snippets/oss-product-list-include.mdx'; + +## Tyk Open Source + + + +## Closed Source + +The following Tyk components, created and maintained by the Tyk Team, are proprietary and closed-source: + +* [Tyk Dashboard](/api-management/dashboard-configuration) +* [Tyk Developer Portal](/portal/overview/intro) +* [Tyk Multi Data Center Bridge](/api-management/mdcb#managing-geographically-distributed-gateways-to-minimize-latency-and-protect-data-sovereignty) +* [Universal Data Graph](/api-management/data-graph#overview) +* [Tyk Operator](/api-management/automations/operator#what-is-tyk-operator) +* [Tyk Sync](/api-management/automations/sync) + +If you plan to deploy and use the above components On-premise, license keys are required. diff --git a/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/approve-requests.mdx b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/approve-requests.mdx new file mode 100644 index 000000000..3c9c5eda7 --- /dev/null +++ b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/approve-requests.mdx @@ -0,0 +1,93 @@ +--- +title: "Managing API Access Requests" +description: "How to provision API Access Requests in Tyk Developer Portal" +keywords: "Developer Portal, Tyk, Dynamic Client Registration" +sidebarTitle: "Manage Access Requests" +--- + +## Introduction + +API Access Requests are formal requests from API Consumers to access specific API Products and Plans through the Developer Portal. These requests initiate the workflow for granting, or provisioning, API access to users. + +### Understanding the Provisioning Request Workflow + +When API Consumers discover APIs in your Catalog that they need access to, they initiate a API Access Request through the Live Portal. This request: + +- Identifies the specific API Product and subscriptionPlan they want to access +- Specifies which Developer App should receive the access credentials +- Creates an auditable record of the access request + +Depending on your configuration, these requests can be processed [automatically]() or require [manual approval](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/approve-requests#manual-approval-workflow). + + + + +## Requesting Access to an API Product + +**API Consumers** can request access to a combination of API Product and Plan from the Catalog(s) presented to them in the Live Portal. + +1. From the **Catalogues** page, choose the API Product of interest and select **More info** +2. On the API Product detail page, decide which of the available Plans to subscribe to and select **Access with this Plan** +3. The combination of API Product and Plan will be added to their *cart* +4. Repeat steps 1-3 for all the API Products you want to access +5. Go to the **Cart** using the icon in the top right of the screen +6. Review the selected API Products and Plan + - all API Products must use the same Authentication method + - a single subscription Plan will be selected +7. Select which Developer App to associate with the request + - if approved, the Access Credentials will be stored in this Developer App + - you can create a new App within the Cart, or select an existing App +8. Select **Submit request** + + +## Manual Approval Workflow + +The manual approval workflow provides API Owners with oversight of all API access. When an API Consumer completes an [access request](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/approve-requests#requesting-access-to-an-api-product), API Owners receive notification of pending request via [email](/product-stack/tyk-enterprise-developer-portal/getting-started/setup-email-notifications) and should then: + +1. Navigate to the **API Consumers > Access Requests** page in the Admin Portal +2. Review the request + - User name + - Developer App + - Requested API Products + - Selected subscription Plan +3. Approve or reject the request from the three dot menu + - If approved, access is provisioned with credentials issued to the specified Developer App + - If rejected, access will not be granted + - API Consumer receives notification of the decision via email + + +## Automatic Approval Workflow + +For trusted users or specific API Products, you can enable automatic approval in the [subscription Plan](/portal/api-plans#auto-approve-provisioning-requests). + +To configure automatic approval, the API Owner should: + +1. Navigate to the **Plans** page in the Admin Portal +2. Select or create the API Plan that should be automatically approved +3. Set the **Auto approve access request** checkbox + Auto Approve API provisioning requests +4. Select **Save changes** + +When an API Consumer requests access using this plan, the request will be approved immediately and access credentials provisioned to the Developer App. + + + +Despite automatic approval, a record of the request is maintained in the **API Consumers > Access requests** page in the Admin Portal. + + + + +## Notification of Decision + +The Dev Portal sends notification to the API Consumer when their request is approved or rejected. + +If the [email service](/product-stack/tyk-enterprise-developer-portal/getting-started/setup-email-notifications) is configured, then: + +- When a request is approved: + - The system sends an approval notification email to the user + - The email uses the template "approve" with a configurable subject + - The notification includes details about the approved access +- When a request is rejected: + - The system sends a rejection notification email to the user + - The email uses the template "reject" with a configurable subject + diff --git a/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/configuring-custom-rate-limit-keys.mdx b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/configuring-custom-rate-limit-keys.mdx new file mode 100644 index 000000000..58c56297f --- /dev/null +++ b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/configuring-custom-rate-limit-keys.mdx @@ -0,0 +1,55 @@ +--- +title: "Configuring Custom Rate Limit Keys in Developer Portal" +description: "How to configure custom rate limit keys in Tyk Developer Portal" +keywords: "Developer Portal, Tyk, Rate Limit" +sidebarTitle: "Advanced Rate Limits" +--- + +## Introduction + +The Tyk Enterprise Developer Portal supports custom rate limiting patterns that allow you to apply rate limits based on entities other than just credentials, such as per application, per developer, or per organization. This is particularly useful for B2B scenarios where API quotas need to be shared across multiple developers and applications within an organization. + +For detailed information about custom rate limiting concepts and configuration, see the [Custom Rate Limiting](/api-management/rate-limit#custom-rate-limiting) section in the main Rate Limiting documentation. + +**Prerequisites** + +This capability works with [Tyk 5.3.0](/developer-support/release-notes/dashboard#530-release-notes) or higher. + +## Configuring Custom Rate Limit Keys in the Portal + + + +If you are using Tyk Developer Portal version 1.13.0 or later, you can configure the custom rate limit keys directly from the Developer Portal in the Advanced settings (optional) collapsible section of the Plan's view (by Credentials metadata). +Add Plan Advanced Settings + + + +For general configuration of custom rate limit keys in policies, refer to the [Custom Rate Limiting](/api-management/rate-limit#custom-rate-limiting) documentation. + +## Using Custom Rate Limit Keys with the Portal + +The Tyk Enterprise Developer Portal facilitates the configuration of various rate limiting options based on a business model for API Products published in the portal. + +To achieve this, the portal, by default, populates the following attributes in the credential metadata, which can be used as part of a custom rate limit key: +- **ApplicationID**: The ID of the application to which the credential belongs. +- **DeveloperID**: The ID of the developer who created the credential. +- **OrganisationID**: The ID of the organization to which the developer belongs. + +Additionally, it's possible to attach [custom attribute values](/portal/customization/user-model#add-custom-attributes-to-the-user-model) defined in a developer profile as metadata fields to credentials. + +When a credential is provisioned by the portal, all the fields described above are added as metadata values to the credential, making them valid options for configuring the rate limit key: + +Credential's metadata + +This approach allows the portal to seamlessly apply rate limits based on any combination of the aforementioned fields and other custom metadata objects defined in policies used for plans or products. This is in addition to credentials. + +--- + + + +**Tyk Enterprise Developer Portal** + +If you are interested in getting access contact us at [support@tyk.io]() + + + diff --git a/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/dynamic-client-registration.mdx b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/dynamic-client-registration.mdx new file mode 100644 index 000000000..b95452d69 --- /dev/null +++ b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/dynamic-client-registration.mdx @@ -0,0 +1,454 @@ +--- +title: "Dynamic Client Registration" +description: "How to configure Dynamic Client Registration in Tyk developer portal" +keywords: "Developer Portal, Tyk, Dynamic Client Registration" +sidebarTitle: "OAuth 2.0 Dynamic Client Registration" +--- + +## Introduction + +**Why OAuth2.0 is important** + +OAuth 2.0 is a crucial security mechanism for both public and internal APIs, as it provides a secure and standardized way to authenticate and authorize access to protected resources. It enables granular access control and revocation of access when necessary without exposing sensitive login credentials. In short, OAuth 2.0 offers a secure and flexible approach to managing access to APIs. + + +Implementing an OAuth2.0 provider can be a complex process that involves several technical and security considerations. As such, many API providers choose to use specialized identity providers instead of implementing OAuth2.0 provider themselves. + + +By using specialized identity providers, API providers can leverage the provider's expertise and infrastructure to manage access to APIs and ensure the security of the authentication process. This also allows API providers to focus on their core business logic and reduce the burden of managing user identities themselves. + +**How does Tyk help** + +Tyk offers a standard and reliable way to work with identity providers through the Dynamic Client Registration protocol (DCR), which is an [Internet Engineering Task Force](https://www.ietf.org/) protocol that establishes standards for dynamically registering clients with authorization servers. + +Tyk Enterprise Developer portal allows API providers to set up a connection with identity providers that support DCR so that API Consumers can use the OAuth2.0 credentials issued by the identity provider to access APIs exposed on the portal. + + +
+ +## Prerequisites +Before getting starting with configuring the portal, it's required to configure your Identity provider and the Dashboard beforehand. + +### Create an initial access token +Before setting up Tyk Enterprise Developer Portal to work with DCR, you need to configure the identity provider. Please refer to the guides for popular providers to create the initial access token for DCR: +* [Gluu](https://gluu.org/docs/gluu-server/4.0/admin-guide/openid-connect#dynamic-client-registration) +* [Curity](https://curity.io/docs/idsvr/latest/token-service-admin-guide/dcr.html) +* [Keycloak](https://github.com/keycloak/keycloak/blob/25.0.6/docs/documentation/securing_apps/topics/client-registration.adoc) +* [Okta](https://developer.okta.com/docs/reference/api/oauth-clients/) + + + + + Whilst many providers require initial access tokens, they are optional. Please refer to your provider documentation to confirm if required. + + + +### Create OAuth2.0 scopes to enforce access control and rate limit + +Tyk uses OAuth2.0 scope to enforce access control and rate limit for API Products. Therefore, creating at least two scopes for an API Product and plan is required. + +The below example demonstrates how to achieve that with Curity, Keycloak and Okta in the tabs below. + + + + +1. **Navigate to Profiles → Token Service → Scopes** + + Click `+ New` to create a new scope. + + Navigate to the Scopes menu + +2. **Give the new scope a name** + + Name the scope **product_payments** and click `Create`. Repeat to create another scope and give it the name **free_plan**. + + Name the scope product_payments + + Name the scope free_plan + + The created scopes can now be assigned to OAuth Clients configured, including DCR clients when they are registered. + +**Unauthenticated DCR** + +The Curity Identity Server by default requires a nonce token with a `dcr` scope to authenticate the DCR endpoint. It obtains this via normal OAuth flows. It is, however, not possible to define an OAuth client_id and secret in Tyk to obtain such a token. A workaround is to disable authentication of the DCR endpoint in the Curity Identity Server by setting it to use no-authentication. + +
+ + + +**Use in secure environments only** + +When configuring the DCR endpoint in the Curity Identity Server to use `no-authentication`, ensure that the communication between Tyk and the Curity Identity Server is secured so that it is only accessible to Tyk. + +To configure this in the AdminUI of the Curity Identity Server, go to Profiles → Token Service → Dynamic Registration → scroll to the Non-templatized section and set Authentication Method to `no-authentication`. + + + +
+ + + +1. **Navigate to the Client scopes menu item** + + Navigate to the Client scopes menu item + +2. **Create a scope for an API Product** + + Create a scope for an API Product + +3. **Create a scope for a plan** + + Create a scope for a plan + + + + + When using Keycloak, ensure that you set the type of the scope to be `Optional`. Default scopes are applied automatically, while optional scopes can be requested by clients on a case-by-case basis to extend the permissions granted by the user. In recent versions of Keycloak this should appear as a dropdown menu option, as shown in the images above. In older releases of Keycloak this may need to be set explicitly in a separate tab, as show on the image below. + + + +Client Scope Assigned Type + + + + + +1. **Create an auth server or use the `Default` authorization server** + + Go to Security β†’ API, Edit one of the auth servers and navigate to `Scopes` + + Add or Edit oauth servers in okta + +2. **Create a scope for an API Product** + + Create a scope for an API Product + +3. **Create a scope for a plan** + + Create a scope for a plan + + + +
+ +## Create Tyk policies for an API Product and plan + + + +You can skip this step if you are using Tyk Developer Portal version 1.13.0 or later. +Go directly to [Configure Tyk Enterprise Developer Portal to work with an identity provider](#configure-tyk-enterprise-developer-portal-to-work-with-an-identity-provider). + + + +Navigate to the Tyk Dashboard and create two policies: one for a plan and one for an API Product. Both policies should include only the APIs with JWT authentication that you want to bundle as an API Product. + +1. **Create a policy for an API product.** + + Create a policy for a product + +2. **Create a policy for a plan.** + + Create a policy for a plan + + +### Create the No Operation policy and API + + + +You can skip this step if you are using Tyk Developer Portal version 1.13.0 or later. +Go directly to [Configure Tyk Enterprise Developer Portal to work with an identity provider](#configure-tyk-enterprise-developer-portal-to-work-with-an-identity-provider). + + + +Tyk requires any API that uses the scope to policy mapping to have [a default policy](/basic-config-and-security/security/authentication-authorization/json-web-tokens ). Access rights and rate limits defined in the default policy take priority over other policies, including policies for the API Product and plan. + +To avoid that, you need to create the No Operation API and policy that won't grant access to the APIs included in the API Product but will satisfy the requirement for a default policy. + +1. **Create the No Operation API.** + + Navigate to the `APIs` menu in the Tyk Dashboard: + Navigate to the API menu in the Tyk Dashboard + + + Create a new HTTP API: + Create the No Operation API + + + Save it: + Save the No Operation API + +
+ +2. **Create the No Operation policy.** + + Navigate to the `Policies` menu in the Tyk Dashboard: + Navigate to the policies menu + + Create a new policy and select the No Operation API in the `Add API Access Rights` section: + Create the No Operation policy + + Configure the No Operation policy and save it: + Save the No Operation policy + +### Configure scope to policy mapping + + + +You can skip this step if you are using Tyk Developer Portal version 1.13.0 or later. +Go directly to [Configure Tyk Enterprise Developer Portal to work with an identity provider](#configure-tyk-enterprise-developer-portal-to-work-with-an-identity-provider). + + + +To enforce policies for the API Product and plan, you need to configure the scope to policy mapping for each API included in the API Product. +To achieve that, perform the following steps for each API included in the API Product. + +1. Navigate to the API. + + Navigate to the API + +2. Select the required JWT signing method. In this example, we use RSA. Leave the `Public key` and `pol` fields blank, they will be filled automatically by the Enterprise portal. + + Select signing method for the API + +3. Select the No Operation policy as the default policy for this API. + + Select the default policy for the API + +4. Enable scope to policy mapping and specify the value of the JWT claim used to extract scopes in the `Scope name` field (the default value is "scope"). + + Enable scope to policy mapping + + +5. Add a scope to policy mapping for the product scope. Type the product scope in the `Claim field` and select the product policy. + + Add scope to policy mapping for the product scope + +6. Add a scope to policy mapping for the plan scope. Type the plan scope in the `Claim field` and select the plan policy, then save the API. + + Add scope to policy mapping for the plan scope + +## Configure Tyk Enterprise Developer Portal to work with an identity provider + +Set up the portal to work with your IdP. + + +### Configure the App registration settings + +In the portal, navigate to the `OAuth2.0 Providers` menu section. In that section, you need to configure the connection settings to the IdP and define one or more types (configurations) of OAuth 2.0 clients. For instance, you can define two types of OAuth 2.0 clients: +* A confidential client that supports the Client credential grant type for backend integrations; +* A web client that supports the Authorization code grant type for integration with web applications that can't keep the client secret confidential. + +Each configuration of OAuth 2.0 client could be associated with one or multiple API Products so that when an API Consumer requests access to an API Product, they can select a client type that is more suitable for their use case. + + +#### Specify connection setting to your IdP + +To connect the portal to the IdP, you need to specify the following settings: +* OIDC well-known configuration URL. +* Initial access token. + +First of all, select your IdP from the `Identity provider` dropdown list. Different IdPs have slightly different approaches to DCR implementation, so the portal will use a driver that is specific to your IdP. If your IdP is not present in the dropdown list, select the `Other` option. In that case, the portal will use the most standard implementation of the DCR driver, which implements the DCR flow as defined in the RFC. + +Then you need to specify the connection settings: [the initial access token and the well-known endpoint](#create-an-initial-access-token). If your Identity Provider uses certificates that are not trusted, the portal will not work with it by default. To bypass certificate verification, you can select the `SSL secure skip verify` checkbox. + +The below example demonstrates how to achieve that with Curity, Keycloak and Okta in the tabs below. + + + + + +Specify connection setting + +Set a **Name**, set the **Identity Provider Type** to `Other`, and the **OIDC well-known configuration URL** aproppriately. + + + + +Specify connection setting to the IdP + + + + + +Specify connection setting to the IdP + +**OIDC URL**: `{your-domain.com}/oauth2/default/.well-known/openid-configuration` + +**Registration Access Token**: To obtain token, Go to Okta Admin Console β†’ Security β†’ API β†’ Tokens β†’ Create New Token + + + + + +#### Create client configurations +Once the connection settings are specified, you need to create one or multiple types of clients. You might have multiple types of clients that are suitable for different use cases, such as backend integration or web applications. + +You need at least one type of client for the DCR flow to work. To add the first client type, scroll down to the `Client Types` section and click on the `Add client type` button. + +To configure a client type, you need to specify the following settings: +* **Client type display name.** This name will be displayed to API consumers when they check out API products. Try to make it descriptive and short, so it's easier for API consumers to understand. +* **Description.** A more verbose description of a client type can be provided in this field. By default, we do not display this on the checkout page, but you can customize the respective template and make the description visible to API consumers. Please refer to [the customization section](/portal/customization#) for guidance. +* **Allowed response_types.** Response types associated with this type of client as per [the OIDC spec](https://openid.net/specs/openid-connect-core-1_0-17.html). +* **Allowed grant_types.** Grant types that this type of client will support as per [the OIDC spec](https://openid.net/specs/openid-connect-core-1_0-17.html). +* **Token endpoint auth methods.** The token endpoint that will be used by this type of client as per [the OIDC spec](https://openid.net/specs/openid-connect-core-1_0-17.html). +* Additionally, there’s an additional field for Okta: **Okta application type** which defines which type of Okta client should be created. Ignored for all other IdPs. + +Please note that your IdP might override some of these settings based on its configuration. + +The below example demonstrates how to achieve that with Curity, Keycloak and Okta in the tabs below. After configuring a client type, scroll to the top of the page to save it by clicking on the `SAVE CHANGES` button. + + + + + +Configure a client type + + + + + +Configure a client type + + + + + +Configure a client type + +**For Okta Client Credentials**: allowed response types MUST be token only + + + + + +## Configure API Products and plans for the DCR flow +Once the App registration settings are configured, it is time for the final step: to configure the API Products and plans to work with the DCR flow. + +### Configure API Products for the DCR flow + +To configure API Products to work with the DCR flow, you need to: +* Enable the DCR flow for the products you want to work with the DCR flow. +* Associate each product with one or multiple types of clients that were created in the previous step. +* Specify scopes for this API Product. Note the portal uses the scope to policy mapping to enforce access control to API Products, so there should be at least one scope. + +For achieving this, navigate to the `API Products` menu and select the particular API product you want to use for the DCR flow. Next, go to the β€˜App registration configs’ section and enable the β€˜Enable dynamic client registration’ checkbox. + +After that, specify the scope for this API product. You should have at least one scope that was created in [the Prerequisites for getting started](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/dynamic-client-registration#prerequisites). If you need to specify more than one scope, you can separate them with spaces. + +Finally, select one or multiple types of clients that were created in [the Create client configurations](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/dynamic-client-registration#create-client-configurations) section of this guide to associate them with that product. + + + + + +Configure an API Product to work with the DCR flow + + + + + +Configure an API Product to work with the DCR flow + + + + + +Configure an API Product to work with the DCR flow + + + + + +
+ + + +From version 1.13.0, you can complete the DCR configuration for a product under the `Dynamic Client Registration` tab in the product's view. Scope to policy mapping for the selected API/s will be automatically configured using the scope defined in the `Scopes` field. +Add DCR settings + + + +#### Configure plans for the DCR flow + +The last step is to configure the plans you want to use with the DCR flow. To do this, go to the portal's `Plans` menu section and specify the OAuth2.0 scope to use with each plan. You should have at least one scope that was created in [the Prerequisites for getting started](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/api-access/dynamic-client-registration#prerequisites). If you need to specify more than one scope, you can separate them with spaces. +Configure a plan to work with the DCR flow + +
+ + + +From version 1.13.0, you can complete the DCR configuration for a plan under the `Advanced settings (optional)` colapsible section in the plan's view. Scope to policy mapping for the plan will be automatically configured using the scope defined in the `Scopes` field. +Add Plan Advanced Settings + + + +## Test the DCR flow +To test the DCR flow, you need to perform the following actions: +- Request access to the API product and plan you have selected for the DCR flow as a developer. +- Approve the access request as an admin. +- As a developer, copy the access credentials and obtain an access token. +- As a developer, make an API call to verify the flow's functionality. + +### Request access to the API Product +To request access to the DCR enabled API Product: +- Log in as a developer and navigate to the catalog page. +- Select the DCR enabled API Product and add it to the shopping cart. +- Navigate to the checkout page. +- On the checkout page, select a plan to use with that product, select an existing application, or create a new one. If you plan to build an application that uses the Authorization code grant type, you also need to specify redirect URI of your application in the `Redirect URLs` field. If you have multiple redirect URI, you can separate them with commas. +- Select a client type which is more suitable for your use case in the `Select a client type` section. +- Finally, select the applicable type of client and click on the `Submit request` button. +Request access to the DCR enabled product + +### Approve the access request +To approve the access request, navigate to the `Access requests` menu in the portal, select the access request and approve it by clicking on the `Approve` button. +Approve DCR access request + + + +When approving an access request, if the Plan scope is not already present in the API Product's scope mappings the Portal will append it in the scope-to-policy mapping declared in the API definition, mapping it to the Id of the Tyk Dashboard consumption policy that relates to the Plan. This will ensure that when the JWT is presented to Tyk, the Plan will be applied to the session. + + + +### Obtain an access token +Once the access request is approved, the developer should receive an email informing them of the approval. Please refer to [the email customization section](/portal/customization/email-notifications) if you wish to change the email template. + +As a developer, navigate to the `My Dashboard` section in the developer portal, select the application, and copy the OAuth 2.0 credentials. + +Copy the OAuth2.0 credentials + +Then use the credentials you have copied to obtain an access token. Make sure to include the scopes that are used to enforce access to the API product and plan. Otherwise, the gateway will not authorize the request. Here's an example of to achieve that with `curl`: +```curl +curl --location --request POST 'http://localhost:9999/realms/DCR/protocol/openid-connect/token' \ +--header 'Authorization: Basic N2M2NGM2ZTQtM2I0Ny00NTMyLWFlMWEtODM1ZTMyMWY2ZjlkOjNwZGlJSXVxd004Ykp0M0toV0tLZHFIRkZMWkN3THQ0' \ +--header 'Content-Type: application/x-www-form-urlencoded' \ +--data-urlencode 'scope=product_payments free_plan' \ +--data-urlencode 'grant_type=client_credentials' +``` +Since in this example we use the client_secret_basic token endpoint authentication method, the credentials must be supplied as a Base64-encoded string: `{client_id}:{client_secret}`. + +As a result, you should receive a JWT access token containing the required scopes: +An example of a JWT + +### Make an API Call +Finally, use the access token to make an API call and test the flow functionality: +```curl +curl --location --request GET 'http://localhost:8080/payment-api/get' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJUR1ZQd25MWlduaWpNc2taU3lHeHFtYnFDNVlIcW9QUUJYZE4xTmJCRDZjIn0.eyJleHAiOjE2Nzg0NDA2ODksImlhdCI6MTY3ODQ0MDM4OSwianRpIjoiMGYwNTdlYjItODQ5My00ZmM2LTllMzQtZTk0OWUzYWQ2MmI2IiwiaXNzIjoiaHR0cDovL2xvY2FsaG9zdDo5OTk5L3JlYWxtcy9EQ1IiLCJzdWIiOiJlNGE3YmFkNy04ZDA4LTQxOTAtODc1Ni1mNTU1ZWQ3Y2JhZjciLCJ0eXAiOiJCZWFyZXIiLCJhenAiOiI3YzY0YzZlNC0zYjQ3LTQ1MzItYWUxYS04MzVlMzIxZjZmOWQiLCJzY29wZSI6ImZyZWVfcGxhbiBwcm9kdWN0X3BheW1lbnRzIiwiY2xpZW50SWQiOiI3YzY0YzZlNC0zYjQ3LTQ1MzItYWUxYS04MzVlMzIxZjZmOWQiLCJjbGllbnRIb3N0IjoiMTcyLjE3LjAuMSIsImNsaWVudEFkZHJlc3MiOiIxNzIuMTcuMC4xIn0.WGp9UIqE7CjFhHdaM64b0G2HGP4adaDg3dgc0YVCV9rTDYmri32Djku7PcLiDKyNLCvlQXUm_O2YmwMCLLUHKPGlRmBMG2y-79-T8z5V-qBATbE6uzwPh38p-SYIIDBUZtlMEhnVp049ZqNolUW-n2uB4CTRb0kDosdRnqhiMUFpe-ORwnZB-4BHGRlwWKyjc5Da6CvVczM1a_c5akqurGMFaX9DC81SS-zMXXpQPDpAkvUJBfLYDHEvXWH8JISqYv7ZQSAbOyE4b-EkVAesyHIMDCQ_pzf5Yp2ivM0dOufN9kdG2w_9ToMqJieVyQILJPowEakmEealbNUFQvc5FA' +``` + +You should receive the following response: +```{.json} +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Authorization": "Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJUR1ZQd25MWlduaWpNc2taU3lHeHFtYnFDNVlIcW9QUUJYZE4xTmJCRDZjIn0.eyJleHAiOjE2Nzg0NDA2ODksImlhdCI6MTY3ODQ0MDM4OSwianRpIjoiMGYwNTdlYjItODQ5My00ZmM2LTllMzQtZTk0OWUzYWQ2MmI2IiwiaXNzIjoiaHR0cDovL2xvY2FsaG9zdDo5OTk5L3JlYWxtcy9EQ1IiLCJzdWIiOiJlNGE3YmFkNy04ZDA4LTQxOTAtODc1Ni1mNTU1ZWQ3Y2JhZjciLCJ0eXAiOiJCZWFyZXIiLCJhenAiOiI3YzY0YzZlNC0zYjQ3LTQ1MzItYWUxYS04MzVlMzIxZjZmOWQiLCJzY29wZSI6ImZyZWVfcGxhbiBwcm9kdWN0X3BheW1lbnRzIiwiY2xpZW50SWQiOiI3YzY0YzZlNC0zYjQ3LTQ1MzItYWUxYS04MzVlMzIxZjZmOWQiLCJjbGllbnRIb3N0IjoiMTcyLjE3LjAuMSIsImNsaWVudEFkZHJlc3MiOiIxNzIuMTcuMC4xIn0.WGp9UIqE7CjFhHdaM64b0G2HGP4adaDg3dgc0YVCV9rTDYmri32Djku7PcLiDKyNLCvlQXUm_O2YmwMCLLUHKPGlRmBMG2y-79-T8z5V-qBATbE6uzwPh38p-SYIIDBUZtlMEhnVp049ZqNolUW-n2uB4CTRb0kDosdRnqhiMUFpe-ORwnZB-4BHGRlwWKyjc5Da6CvVczM1a_c5akqurGMFaX9DC81SS-zMXXpQPDpAkvUJBfLYDHEvXWH8JISqYv7ZQSAbOyE4b-EkVAesyHIMDCQ_pzf5Yp2ivM0dOufN9kdG2w_9ToMqJieVyQILJPowEakmEealbNUFQvc5FA", + "Host": "httpbin.org", + "User-Agent": "curl/7.85.0", + }, + "origin": "XXX.XXX.XXX.XXX, XXX.XXX.XXX.XXX", + "url": "http://httpbin.org/get" +} +``` + diff --git a/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/enable-sso.mdx b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/enable-sso.mdx new file mode 100644 index 000000000..753d26713 --- /dev/null +++ b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/enable-sso.mdx @@ -0,0 +1,647 @@ +--- +title: "Enable single sign on for admin users and developers" +description: "Learn how to enable single sign on for admin users and developers in the Developer Portal." +keywords: "Tyk Developer Portal, Enterprise Portal, Email, Notifications" +sidebarTitle: "Single Sign On" +--- + +## Introduction + +Single sign-on (SSO) enables users to access multiple applications using one set of login credentials, +reducing the burden of password management and improving security. SSO is relevant for businesses of all sizes, +streamlining access control and improving user experience. Regardless of your organization's size, implementing SSO can enhance security, +simplify access to enterprise resources, and strengthen user satisfaction. + +In this section, you'll learn how to enable single sign-on for admin users and developers in the Tyk Enterprise Developer portal with 3rd party identity providers (IDPs). + +**Prerequisites** +- A Tyk Enterprise portal installation +- [Supported](https://github.com/TykTechnologies/tyk-identity-broker#using-identity-providers) 3rd party identity provider up and running + +## Portal SSO Configuration Options + +Tyk Enterprise Developer Portal uses the [Tyk Identity Broker (TIB)](/api-management/external-service-integration#what-is-tyk-identity-broker-tib) to integrate Tyk authentication with 3rd party identity providers (IDPs). + +From portal version 1.12.0, TIB is embedded in the portal. With this, you have two options to configure SSO in the portal: + +1. **[Using Embedded TIB](#configuring-sso-with-embedded-tib)**: No need to install it separately. +2. **[Using External TIB](#configuring-sso-with-external-tib)**: If you are using a previous version of the portal, you can still use SSO with TIB installed as a separate application. + +## Configuring SSO with Embedded TIB + +Configuring SSO with Embedded TIB is a four-step process: + +1. **[Enabling Embedded TIB](#enabling-embedded-tib)** +2. **[Understanding UserGroup Mapping](#understanding-usergroup-mapping)** +3. **[Creating TIB Profile](#creating-tib-profile)** +4. **[Testing SSO](#testing-sso)** + +### Enabling Embedded TIB + +To enable Embedded TIB in the portal, add the `PORTAL_TIB_ENABLED` variable to [the portal .env file](/product-stack/tyk-enterprise-developer-portal/deploy/configuration#sample-env-file): +```.ini +PORTAL_TIB_ENABLED=true +``` + + + +The Tyk Enterprise Developer Portal embedded TIB only supports OIDC, LDAP or Social SSO providers. + + + +### Understanding UserGroup Mapping + +The Tyk Enterprise Developer portal has two audiences: + +1. **Developers**: + + Developers created by the sso flow are portal users that belong to an organization and team/s, if a user group mapping is not specified, they are assigned to the default organization and default team. Developers created by the sso flow are always assinged the **Consumer Super Admin** role. If part of an organization and a team, this means that the developer is a super admin for that organization. Read more about managing api consumer organizations [here](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations). + +2. **Admins**: + + Admins created by the SSO flow are portal users who do not belong to any organization (OrgID is 0) and are assigned the **Provider Admin** role. + +TIB uses **user group mapping** to map the user groups from the identity provider to the portal teams within an organization. +User group mapping + +To define the user group mapping for your developer audience, you need to add the UserGroupMapping object to the corresponding [TIB profile](/api-management/external-service-integration#exploring-tib-profiles): +```yaml + "UserGroupMapping": { + "{IDP groupA ID}": "{portal teamA ID}", + "{IDP groupB ID}": "{portal teamB ID}", + ... + } +``` + +#### Default behaviour of UserGroup Mapping + +The `UserGroupMapping` object contains keys that refer to group IDs in your IDP, and the corresponding values are team IDs in the portal. +When the Tyk Identity Broker authorizes a user, it searches for a key that matches the user's group ID in the IDP. +If TIB can't find a matching group ID, it logs the user in to the team with an ID equal to `DefaultUserGroupID` in the portal (if `DefaultUserGroupID` is defined). +We recommend always defining `DefaultUserGroupID` and ensuring it refers to a valid team ID in your portal instance. The portal will refuse login attempts if `DefaultUserGroupID` is defined but refers to an invalid team ID. + +If no matching group ID is found in the `UserGroupMapping` object and `DefaultUserGroupID` isn't defined, the portal logs in the user to the "Default organization | All users" team with an ID of 1. + +#### Login Evaluation Algorithm + +To determine whether a developer should be allowed to log in and which team they should be logged into, the portal uses the following algorithm: +User group mapping algorithm + +### Creating TIB Profile + +In the following sections you will learn how to configure the SSO profiles for admins and developers and map developers to the teams. + +You can configure the SSO profiles for admins in the Tyk Developer Portal application. Under **Settings** > **SSO Profiles** > **Add new SSO Profile**. + +There are two ways of creating SSO profiles: +1. **[Wizard Form](#using-the-wizard-form)**: Create a profile using the wizard guided form. +2. **[Raw JSON Editor](#using-the-json-raw-editor)**: Create a profile using JSON editor where you can specify your tib raw JSON profile. + +#### Using the Wizard Form + +You can access the wizard form by switching to the **Wizard** view. + + +Create a profile for admins: +1. Complete the **Profile action** step. Here you can choose a name (this name will generate the profile ID), the profile type (select **Profile for admin users**) and the redirect url on failure. +SSO Profiles Wizard +2. Select a supported **Provider type**. +SSO Profiles Wizard +3. Complete the **Profile configuration** step. Here you can specify the access to your idp. And Advanced settings if needed. +SSO Profiles Wizard +4. Don't add any group mapping since we are creating a profile for admins and will be ignored if added. Click on **Continue** to create the profile. +SSO Profiles Wizard + + + +Create a profile for developers: + +1. Complete the **Profile action** step. Here, you can choose a name (this name will generate the profile ID), the profile type (select **Profile for developers**), and the redirect URL on failure. + + SSO Profiles Wizard + +2. Select a supported **Provider type**. + + SSO Profiles Wizard + +3. Complete the **Profile configuration** step. Here you can specify the access to your idp. And Advanced settings if needed. + + SSO Profiles Wizard + +4. Add the group mapping for the developers. **Custom user group claim name** must equal the JWT claim name that refers to the user group in your IDP. + + SSO Profiles Wizard + +5. Click on **Continue** to create the profile. + + + +#### Using the JSON Raw Editor + +The Tyk Identity Broker (TIB) uses [profiles](/api-management/external-service-integration#exploring-tib-profiles) to define details related to the identity provider such as its type and access credentials, and instructs TIB on how to treat users that try log in with that provider. +You can access the raw editor by switching to the **Raw editor** view, which displays a JSON editor with an empty TIB profile for guidance. +SSO Profiles Raw Editor + + + +Create a profile for admins. Make sure the ActionType is equal to `GenerateOrLoginUserProfile` and the OrgID is equal to β€œ0.” +SSO Profiles Raw Editor + +In the above example, you need to specify the following parameters: +- `OrgID` must be `"0"` for being accepted as a provider-admin +- `ActionType` must be equal to `"GenerateOrLoginUserProfile"` +- Replace the `host` and `port` in the fields `CallbackBaseURL`, `FailureRedirect` and `ReturnURL` with the actual host and port on which your portal instance is running. Also, replace `http` with `https` for the respective fields if you use https for your portal instance +- Replace the `host` and `port` in the field `DiscoverURL` with the actual host and port on which your IDP instance is running. Also, replace `http` with `https` accordingly +- In the `"ID"` field, specify an ID of this TIB profile. You can select any value for this field that consists of digits, letters, and special signs, no spaces allowed. It is better to pick a human-readable ID for your profile for better maintainability of the configuration + + + + +Create a developer profile. Ensure the ActionType is equal to β€œGenerateOrLoginDeveloperProfile,” and if you define a user group mapping, the team/s exist in the portal. +SSO Profiles Raw Editor + +In the above example, you need to specify the following parameters: +- `OrgID` could be anything as its value is ignored; +- `ActionType` must be equal to `"GenerateOrLoginDeveloperProfile"` +- Replace the `host` and `port` in the fields `CallbackBaseURL`, `FailureRedirect` and `ReturnURL` with the actual host and port on which your portal instance is running. Also, replace `http` with `https` for the respective fields if you use HTTPS for your portal instance +- Replace the `host` and `port` in the field `DiscoverURL` with the actual host and port on which your IDP instance is running. Also, replace `http` with `https` accordingly +- In the `"ID"` field, specify an ID of this TIB profile. You can select any value for this field that consists of digits, letters, and special signs; no spaces are allowed. It is better to pick a human-readable ID for your profile for better maintainability of the configuration +- `CustomUserGroupField` must be equal to the JWT claim name that refers to the user group in your IDP +- `UserGroupMapping` is an object that defines the relationship between user groups in the IDP and teams in the portal. If not specified, the optional parameter will cause the portal to rely on the `DefaultUserGroupID` field to determine which team a developer should log in to. Please refer to the [User group mapping section](#user-group-mapping ) for guidance +- `DefaultUserGroupID` is the default organization that the portal will use to determine which team a developer should be logged in to if it is not able to find a UserGroupMapping for that developer + + + +**Nuances of OIDC configuration** + +To ensure that the portal can log in a user with your OIDC Identity provider, you may need to either explicitly specify the email scopes in a profile +configuration or configure your IDP to include the email claim in the JWT. Failure to include the email scope in the JWT +would result in the portal not having access to the user's email. + +As an example, for Okta, you can use the following configuration: +```json +"UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "Scopes": ["openid", "email"], + "DiscoverURL": "{OIDC well-known endpoint}" + } +] +``` + + +Please refer to the [TIB configuration section](/api-management/external-service-integration#single-sign-on-sso) for step-by-step instructions for setting up the UseProviders section. + + + +### Testing SSO + +You can access the login URL in your SSO Profile details **Provider configuration** section. +SSO Profile Details + +Tyk Enterprise Developer Portal doesn't supply a login page for Single Sign-On out of the box, so you might need to create one. +Here is an example of such a page that works with a profile for the LDAP identity management system: +```.html + + + Tyk Developer portal login + + + Login to the Developer portal +
+ username:
+ password:
+ +
+ + +``` + + +Configuration on the portal side is quite straightforward. You need to specify the portal SSO API secret that acts as a credential for the APIs that are used by TIB for communication with the portal within Single Sign-On flow. +You can use any value for the portal SSO API secret, but it should be consistent with [TIB configuration](#configure-tyk-identity-broker-to-work-with-tyk-enterprise-developer-portal). + +To specify the portal SSO API secret, add the `PORTAL_API_SECRET` variable to [the portal .env file](/product-stack/tyk-enterprise-developer-portal/deploy/configuration#sample-env-file): +```.ini +PORTAL_API_SECRET=your-portal-api-secret +``` + +If you use [the Tyk helm chart](/tyk-self-managed/install#install-more-tyk-components), it is required to add the `PORTAL_API_SECRET` to extraEnvs: +```.yaml +extraEnvs: +- name: PORTAL_API_SECRET + value: "your-portal-api-secret" +``` + +## Configuring SSO with External TIB + +### Configure Tyk Identity Broker to work with Tyk Enterprise Developer Portal +The Tyk Enterprise Developer portal uses the [Tyk Identity Broker](/api-management/external-service-integration#what-is-tyk-identity-broker-tib) to work with various Identity Management Systems, such as LDAP, +Social OAuth (e.g., GPlus, Twitter, GitHub), or Basic Authentication providers. Therefore, to configure Single Sign-On for the portal, +you need to install and configure Tyk Identity Broker first. Follow these steps to achieve this: + +#### Install Tyk Identity Broker +Please refer to [the TIB installation guide documentation](/api-management/external-service-integration#install-standalone-tib) for different installation options: +- [Docker](https://hub.docker.com/r/tykio/tyk-identity-broker/#the-tibconf-file) +- [packages](https://packagecloud.io/tyk/tyk-identity-broker/install#bash-deb) +- [Tyk helm chart](/api-management/external-service-integration#install-standalone-tib) + +#### Specify TIB settings to work with the Tyk Enterprise Developer portal + +##### Docker or packages + +Create tib.conf file for [the Docker installation](https://hub.docker.com/r/tykio/tyk-identity-broker/#the-tibconf-file) or if you use [packages](https://packagecloud.io/tyk/tyk-identity-broker/install#bash-deb) to deploy TIB: +```.json +{ + "Secret":"test-secret", + "HttpServerOptions":{ + "UseSSL":false, + "CertFile":"./certs/server.pem", + "KeyFile":"./certs/server.key" + }, + "SSLInsecureSkipVerify": true, + "BackEnd": { + "Name": "in_memory", + "IdentityBackendSettings": { + "Hosts" : { + "localhost": "6379" + }, + "Password": "", + "Database": 0, + "EnableCluster": false, + "MaxIdle": 1000, + "MaxActive": 2000 + } + }, + "TykAPISettings":{ + "DashboardConfig":{ + "Endpoint":"https://{your portal host}", + "Port":"{your portal port}", + "AdminSecret":"{portal-api-secret}" + } + } +} +``` +Setting reference: +- **TykAPISettings.DashboardConfig.Endpoint** is the Developer portal url. Pay attention if any of the elements (TIB or Portal) is running on containers. +- **TykAPISettings.DashboardConfig.Port** is the Developer portal port. +- **TykAPISettings.DashboardConfig.AdminSecret** is `PortalAPISecret` in the configuration file of the Developer portal. + +The full reference for the configuration file is in [the TIB section of the documentation](/tyk-configuration-reference/tyk-identity-broker-configuration). +##### Helm charts +If you wish ot deploy TIB in Kubernetes via [Tyk helm chart](/api-management/external-service-integration#install-standalone-tib), you need to specify TIB config as extraVars: +```.yaml +extraEnvs: + - name: TYK_IB_HTTPSERVEROPTIONS_CERTFILE + value: "./certs/server.pem" + - name: TYK_IB_HTTPSERVEROPTIONS_KEYFILE + value: "./certs/server.key" + - name: TYK_IB_SSLINSECURESKIPVERIFY + value: "true" + - name: TYK_IB_BACKEND_NAME + value: "in_memory" + - name: TYK_IB_BACKEND_IDENTITYBACKENDSETTINGS_HOSTS + value: "redis.tyk-cp:6379" + - name: TYK_IB_BACKEND_IDENTITYBACKENDSETTINGS_PASSWORD + value: "" + - name: TYK_IB_BACKEND_IDENTITYBACKENDSETTINGS_DATABASE + value: "0" + - name: TYK_IB_BACKEND_IDENTITYBACKENDSETTINGS_ENABLECLUSTER + value: "false" + - name: TYK_IB_BACKEND_IDENTITYBACKENDSETTINGS_MAXIDLE + value: "1000" + - name: TYK_IB_BACKEND_IDENTITYBACKENDSETTINGS_MAXACTIVE + value: "2000" + - name: TYK_IB_TYKAPISETTINGS_DASHBOARDCONFIG_ENDPOINT + value: "https://{your portal host}" + - name: TYK_IB_TYKAPISETTINGS_DASHBOARDCONFIG_PORT + value: "{your portal port}" + - name: TYK_IB_TYKAPISETTINGS_DASHBOARDCONFIG_ADMINSECRET + value: "{portal-api-secret}" +``` + +The full reference for the configuration file is in [the TIB section of the documentation](/tyk-configuration-reference/tyk-identity-broker-configuration). + +### Configure Single Sign-On for admin users and developers + +#### What is the Tyk Identity Broker profile +The Tyk Identity Broker (TIB) uses [profiles](/api-management/external-service-integration#exploring-tib-profiles) to define details related to the identity provider such as its type and access credentials, and instructs TIB on how to treat users that try log in with that provider. +In this guide, you will create two TIB profiles for admins users and developers. This allows you to have different identity providers for admins and developers as well as for internal and external users. + +Depending on your installation options for TIB, you need to specify profiles via a json file (for Docker or packages) or via a ConfigMap (for Tyk Helm Chart). + +##### profiles.json for Docker or packages installation +Here is an example of profiles.json file for Docker or packages installation: +```.json +[ + { + "ActionType": "GenerateOrLoginUserProfile", + "ID": "{ID of your TIB profile}", + "OrgID": "0", + "IdentityHandlerConfig": { + "DashboardCredential": "{portal API secret}" + }, + "ProviderConfig": { + "CallbackBaseURL": "http://{TIB host}:{TIB port}", + "FailureRedirect": "http://{portal host}:{portal port}/?fail=true", + "UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "DiscoverURL": "OIDC well-known endpoint" + } + ] + }, + "ProviderName": "SocialProvider", + "ReturnURL": "http://{portal host}:{portal port}/sso", + "Type": "redirect" + }, + { + "ActionType": "GenerateOrLoginDeveloperProfile", + "ID": "{ID of your TIB profile}", + "OrgID": "0", + "IdentityHandlerConfig": { + "DashboardCredential": "{portal API secret}" + }, + "ProviderConfig": { + "CallbackBaseURL": "http://{TIB host}:{TIB port}", + "FailureRedirect": "http://{portal host}:{portal port}/?fail=true", + "UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "DiscoverURL": "OIDC well-known endpoint" + } + ] + }, + "ProviderName": "SocialProvider", + "ReturnURL": "http://{portal host}:{portal port}/sso", + "Type": "redirect", + "DefaultUserGroupID": "1" + } +] +``` + +##### ConfigMap for Tyk Helm chart installation +Here is an example of ConfigMap for the Tyk Helm chart installation: +```.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: tyk-tib-profiles-conf +data: + profiles.json: | + [{ + "ActionType": "GenerateOrLoginUserProfile", + "ID": "{ID of your TIB profile}", + "OrgID": "0", + "IdentityHandlerConfig": { + "DashboardCredential": "{portal API secret}" + }, + "ProviderConfig": { + "CallbackBaseURL": "http://{TIB host}:{TIB port}", + "FailureRedirect": "http://{portal host}:{portal port}/?fail=true", + "UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "DiscoverURL": "OIDC well-known endpoint" + } + ] + }, + "ProviderName": "SocialProvider", + "ReturnURL": "http://{portal host}:{portal port}/sso", + "Type": "redirect" + }, + { + "ActionType": "GenerateOrLoginDeveloperProfile", + "ID": "{ID of your TIB profile}", + "OrgID": "0", + "IdentityHandlerConfig": { + "DashboardCredential": "{portal API secret}" + }, + "ProviderConfig": { + "CallbackBaseURL": "http://{TIB host}:{TIB port}", + "FailureRedirect": "http://{portal host}:{portal port}/?fail=true", + "UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "DiscoverURL": "OIDC well-known endpoint" + } + ] + }, + "ProviderName": "SocialProvider", + "ReturnURL": "http://{portal host}:{portal port}/sso", + "Type": "redirect", + "DefaultUserGroupID": "1" + }] +``` + +#### Configure Single Sign-On for admin users +The Tyk Enterprise Developer portal has two audiences: developers and admins. This section provides guidance on implementing +Single Sign-On for admin users. The configuration is rather straightforward, and you need to take these three steps +to enable Single Sign-On for admin users in your portal instance: +1. Create a profile for the Tyk Identity Broker (TIB) to work on your identity provider. Make sure the ActionType is equal to "GenerateOrLoginUserProfile", and OrgID is equal to "0": +```.json +[{ + "ActionType": "GenerateOrLoginUserProfile", + "ID": "{ID of your TIB profile}", + "OrgID": "0", + "IdentityHandlerConfig": { + "DashboardCredential": "{portal API secret}" + }, + "ProviderConfig": { + "CallbackBaseURL": "http://{TIB host}:{TIB port}", + "FailureRedirect": "http://{portal host}:{portal port}/?fail=true", + "UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "DiscoverURL": "{OIDC well-known endpoint}" + } + ] + }, + "ProviderName": "SocialProvider", + "ReturnURL": "http://{portal host}:{portal port}/sso", + "Type": "redirect" +}] +``` +In the above example, you need to specify the following parameters: +- `OrgID` must be `"0"` for being accepted as a provider-admin or super-admin +- `ActionType` must be equal to `"GenerateOrLoginUserProfile"` +- `IdentityHandlerConfig.DashboardCredential` must be equal to the `PortalAPISecret` field in the configuration file of the portal +- Replace `{portal host}` and `{portal port}` with the actual host and port on which your portal instance is running. Also, replace `http` with `https` for the respective fields if you use https for your portal instance +- Replace `{TIB host}` and `{TIB port}` with the actual host and port on which your TIB instance is running. Also, replace `http` with `https` for the respective fields if you use https for your TIB instance +- In the `"ID"` field, specify an ID of this TIB profile. You can select any value for this field that consists of digits, letters, and special signs, no spaces allowed. It is better to pick a human-readable ID for your profile for better maintainability of the configuration + + + + + **Nuances of OIDC configuration** + + To ensure that the portal can log in a user with your OIDC Identity provider, you may need to either explicitly specify the email scopes in a profile + configuration or configure your IDP to include the email claim in the JWT. Failure to include the email scope in the JWT + would result in the portal not having access to the user's email. + + As an example, for Okta, you can use the following configuration: + ```yaml + "UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "Scopes": ["openid", "email"], + "DiscoverURL": "{OIDC well-known endpoint}" + } + ] + ``` + + + +Please refer to the [TIB configuration section](/api-management/external-service-integration#single-sign-on-sso) for step-by-step instructions for setting up the UseProviders section. +Any changes to the TIB profile will be effective after restarting your TIB instance. + +2. Create a login page for admin users. We don't supply a login page for Single Sign-On out of the box, so you need to create one. +Here is an example of such page that works with a profile for LDAP identity management system: +```.html + + + Tyk Developer portal login + + + Login to the Developer portal +
+ username:
+ password:
+ +
+ + +``` +3. Now you should be able to log in to the portal with your identity provider as an admin user + +#### Configure Single Sign-On for developers +This section relates to configuration and settings required to set up Single Sign-On for developers. Configuration for developers is also straight forward. +However, for developers there is one additional. + +##### User group mapping +In order to land a developer into the right API Consumer organization, it is necessary to configure the UserGroupMapping +in the TIB profile that creates a binding between user groups in your IDP and developer teams in the portal. + +User group mapping + +To define the user group mapping for your developer audience, you need to add the UserGroupMapping object to the corresponding TIB profile: +```yaml + "UserGroupMapping": { + "{IDP groupA ID}": "{portal teamA ID}", + "{IDP groupB ID}": "{portal teamB ID}", + ... + } +``` + +The `UserGroupMapping` object contains keys that refer to group IDs in your IDP, and the corresponding values are team IDs in the portal. +When the Tyk Identity Broker authorizes a user, it searches for a key that matches the user's group ID in the IDP. +If TIB can't find a matching group ID, it logs the user in to the team with an ID equal to `DefaultUserGroupID` in the portal (if `DefaultUserGroupID` is defined). +We recommend always defining `DefaultUserGroupID` and ensuring that it refers to a valid team ID in your portal instance. If `DefaultUserGroupID` is defined but refers to an invalid team ID, the portal will refuse login attempts. + +If no matching group ID is found in the `UserGroupMapping` object and `DefaultUserGroupID` isn't defined, the portal logs in the user to the "Default organization | All users" team with an ID of 1. + +To determine whether a developer should be allowed to log in and which team they should be logged into, the portal uses the following algorithm: +User group mapping algorithm + + +##### Configure profile to enable Single Sign-On for developers +Follow these steps to enable Single Sign-On for developers: +1. Create a profile for the Tyk Identity Broker (TIB) to work on your identity provider. Make sure the ActionType is equal to "GenerateOrLoginUserProfile", and OrgID is equal to "0": +```.json +[{ + "ActionType": "GenerateOrLoginDeveloperProfile", + "ID": "{ID of your TIB profile}", + "OrgID": "0", + "IdentityHandlerConfig": { + "DashboardCredential": "{ID of your TIB profile}" + }, + "ProviderConfig": { + "CallbackBaseURL": "http://{TIB host}:{TIB port}", + "FailureRedirect": "http://{portal host}:{portal port}/?fail=true", + "UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "DiscoverURL": "{OIDC well-known endpoint}" + } + ] + }, + "ProviderName": "SocialProvider", + "ReturnURL": "http://{portal host}:{portal port}/sso", + "Type": "redirect", + "CustomUserGroupField": "{your group ID field}", + "UserGroupMapping": { + "{IDP group ID}": "{portal team ID}" + }, + "DefaultUserGroupID": "{portal team ID}" +}] +``` +In the above example, you need to specify the following parameters: +- `OrgID` could be anything as its value is ignored; +- `ActionType` must be equal to `"GenerateOrLoginDeveloperProfile"` +- `IdentityHandlerConfig.DashboardCredential` must be equal to the `PortalAPISecret` field in the configuration file of the portal +- Replace `{portal host}` and `{portal port}` with the actual host and port on which your portal instance is running. Also, replace `http` with `https` for the respective fields if you use https for your portal instance +- Replace `{TIB host}` and `{TIB port}` with the actual host and port on which your TIB instance is running. Also, replace `http` with `https` for the respective fields if you use https for your TIB instance +- In the `"ID"` field, specify an ID of this TIB profile. You can select any value for this field that consists of digits, letters, and special signs, no spaces allowed. It is better to pick a human-readable ID for your profile for better maintainability of the configuration +- `CustomUserGroupField` must be equal to the JWT claim name that refers to the user group in your IDP +- `UserGroupMapping` an object that defines relationship between user groups in the IDP and teams in the portal. The optional parameter, if not specified, will cause the portal to rely on the `DefaultUserGroupID` field to determine which team a developer should log in to. Please refer to the [User group mapping section](#user-group-mapping ) for guidance +- `DefaultUserGroupID` is the default organization that the portal will use to determine which team a developer should be logged in to if it is not able to find a UserGroupMapping for that developer + + + + + **Nuances of OIDC configuration** + + To ensure that the portal can log in a user with your OIDC Identity provider, you may need to either explicitly specify the email scopes in a profile + configuration or configure your IDP to include the email claim in the JWT. Failure to include the email scope in the JWT + would result in the portal not having access to the user's email. + + As an example, for Okta, you can use the following configuration: + ```json + "UseProviders": [ + { + "Name": "openid-connect", + "Key": "{oAuth2.0 key}", + "Secret": "{oAuth2.0 secret}", + "Scopes": ["openid", "email"], + "DiscoverURL": "{OIDC well-known endpoint}" + } + ] + ``` + + + +2. Create a login page for developers. We don't supply a login page for Single Sign-On out of the box, so you need to create one. +Here is an example of such page that works with a profile for LDAP identity management system: +```.html + + + Tyk Developer portal login + + + Login to the Developer portal +
+ username:
+ password:
+ +
+ + +``` +3. Now you should be able to log in to the portal with your identity provider as a developer + diff --git a/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations.mdx b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations.mdx new file mode 100644 index 000000000..a59481e60 --- /dev/null +++ b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations.mdx @@ -0,0 +1,334 @@ +--- +title: "Organisations and Teams" +description: "How to manage Organisations in Tyk Developer Portal" +keywords: "Developer Portal, Tyk, API Consumer, Organisation, Organization, Team" +sidebarTitle: "Organisations and Teams" +--- + +## Introduction + +The Tyk Developer Portal uses Organisations and Teams to provide flexible, hierarchical access control for your API ecosystem. This structure allows you to manage API Consumers at both the organizational level and in smaller functional groups, reflecting real-world business relationships and access requirements. + +Unlike individual developer accounts, Organisations represent entire companies or business entities with sophisticated requirements: + +- **Team-based access:** Companies typically have multiple developers who need access to your APIs. Tyk Developer Portal's Organisation and Team structure ensures communication and access don't depend on a single individual who might leave the company. +- **Secure credential sharing**: Organizations need secure ways to share API credentials within their teams. Without proper tooling, developers resort to sharing credentials through insecure channels, creating security risks. +- **Hierarchical permissions**: Within organizations, some users need administrative capabilities while others require more limited access. The Tyk Developer Portal supports this through API Consumer Admin and Team Member roles. +- **Self-service team management**: Organizations can maintain their own teams by inviting new members or removing departed ones, reducing administrative overhead for API providers. + +This organizational approach allows you to manage API Consumers at both the company level and in smaller functional groups, supporting complex business relationships while maintaining security and governance. + +
+ + +**A note on spelling** + +Throughout this documentation, we use specific spelling conventions to help distinguish between product features and general concepts: +- Organisation (with an 's') refers specifically to the entity within the Tyk Developer Portal (sometimes abbreviated to Org) +- organization (with a 'z') refers to real-world businesses or the general concept of organizing + +This British/American English distinction helps clarify when we're discussing the Tyk Developer Portal feature versus general organizational concepts. + + + +### Understanding the Organizational Hierarchy + +Organisations and Teams create a two-level hierarchy that provides granular control over API access. This allows API Owners to manage access at multiple levels, supporting complex business relationships while maintaining security and governance. Note that users can belong to multiple Teams within an Organisation, allowing for flexible resource allocation based on project needs or job responsibilities. + +For example, consider a Partner (Acme Bank) that wishes to consume your APIs. They have an *Accounts* team that requires access to a specific set of APIs and a *Development* team that requires access to those plus additional APIs. + +- You create an Organisation for the client (Acme Bank) +- You create separate Teams for their *Accounts* and *Development* users +- You construct two [Catalogs](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-catalogues) of [API Products](/portal/api-products) and [Plans](/portal/api-plans) + - Catalog 1 contains the Accounts APIs and subscription Plans + - Catalog 2 contains the Developer APIs and subscription Plans +- You configure Catalog visibility as follows: + - Catalog 1 is made visible to both Teams + - Catalog 2 is made visible only to the Developer Team +- You create an [API Consumer Admin](/portal/api-consumer#api-consumer-admin) user for each Team + - these users can invite colleagues into their Team as [API Consumer Team Member](/portal/api-consumer#team-member) users + +Diagram showing an Organisation with two Teams of API Consumers + +With this configuration, the Admin and Team Members in each team are unaware of the other Team or its members. The members of the Accounts team have access to discover and consume the API Products in Catalog 1, whilst the members of the Development team have access to both Catalogs. + +### Default Organisation + +The system automatically creates a pre-configured "Default Organisation" during the [bootstrap](/portal/install#bootstrapping-enterprise-developer-portal) process that serves as the initial home for: + +- Self-registered users without an [invite code](/portal/api-consumer#invite-codes) +- API Consumer users created by API Owners without a specific Organisation assignment +- API Consumer users whose Organisation has been [deleted](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#deleting-organisations) + +While the Default Organisation cannot be deleted, you can: + +- [Rename](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#editing-organisation-details) it to better reflect your business needs +- Move users from it to other Organisations as needed +- Use it as a holding area for users awaiting proper Organisation assignment + +#### Developer App visibility + +[Team and Organisation level app visibility](/portal/developer-app#visibility) is not applied within the Default Organisation. This behavior has been implemented to prevent accidental exposure of Developer Apps if a user is removed from a custom Organisation and automatically reverts to the Default Org. + +
+ + +We do not recommend using the Default Org for publication of API Products and Plans. + + + +## Managing Organisations + +Organisations represent companies or business units that consume your APIs. + +- **Purpose**: Group related teams and developers under a single entity +- **Hierarchy**: Each API Consumer belongs to exactly one Organisation +- **Default Organisation**: A system-provided Organisation where users are placed if not assigned elsewhere +- **Creation**: Organisations can only be created by API Owners (or by self-registered developers if [Organisation requests](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#requesting-a-new-organisation) are enabled) +- **Management**: + - API Owners can create, modify, and delete any Organisation + - API Consumer Admins can manage users within their Organisation + +### Creating Organisations + +As an API Owner, you can create new Organisations to represent partner companies or business units: + +1. Navigate to **API Consumers > Organisations** in the Admin Portal +2. Select **Add new Organisation** + Click on Add to create a new Organisation +3. Provide a **Name** for the new Organisation + Giving the new Organisation a name +4. Select **Save changes** to create the Organisation + +Once created, you can begin adding Teams and users to the Organisation. Note that a [default Team](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#default-team) is automatically created with the Organisation. + +### Editing Organisation Details + +As an API Owner, you can change the name of an Organisations to represent changes in partner companies or business units: + +1. Navigate to **API Consumers > Organisations** in the Admin Portal +2. Select the Organisation you want to rename +3. Update the **Name** +4. Select **Save changes** + +### Deleting Organisations + +As an API Owner, you can delete an Organisation to represent changes in partner companies or business units: + +1. Navigate to **API Consumers > Organisations** in the Admin Portal +2. Select the three dot menu next to the Organisation you want to delete +3. Select **Delete** +4. Confirm the deletion + +The Organisation and any Teams created within it will be deleted immediately. + +All users (both API Consumer Admins and Team Members) will be moved to the [default Team](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#default-team) in the [default Organisation](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#default-organisation) where any Developer Apps they own will have their visibility set to [Personal](/portal/developer-app#visibility) + +### Best Practices for Organisation Management + +- **Naming conventions**: Establish a consistent naming pattern for Organisations +- **Regular audits**: Periodically review Organisation membership and activity +- **Documentation**: Maintain records of which real-world entities each Organisation represents +- **Onboarding process**: Create a standardized workflow for adding new Organisations + +## Working With Teams + +Teams are groups of developers who collaborate on related projects. + +- **Purpose**: Enable collaboration and shared access to API resources +- **Hierarchy**: + - Teams exist within a specific Organisation + - API Consumers can belong to multiple Teams within their Organisation +- **Default Team**: Each Organisation has a default Team where users are placed if not assigned to any other team +- **Creation**: Teams can only be created by API Owners +- **Management**: + - **API Owners** can create, modify, and delete any Team + - **API Consumer Admins** can manage team membership within their Organisation + +### Creating Teams + +Teams allow you to organize API Consumers into functional groups with specific API access: + +1. As an API Owner, navigate to **API Consumers > Teams** in the Admin Portal +2. Select **Add new Team** +3. Complete the team details: + - **Name**: A descriptive name for the team (required) + - **Organisation**: Select the Organisation this team belongs to +4. Select **Save changes** + +Teams can represent departments, project groups, or any logical grouping that helps organize API access within an Organisation. + +### Managing Team Membership + +Once a team is created, an API Owner can add members from the Organisation containing the Team: + +1. Navigate to **API Consumers > Users** in the Admin Portal +2. Find and select the user you wish to add or remove +3. If they are not in the Organisation containing the Team, change their **Organisation** +3. Modify their Team membership in the **Teams** section +4. Select **Save changes** + +An API Consumer Admin can configure the Team membership of other API Consumer users that share any Teams with the Admin as described [here](/portal/api-consumer#managing-api-consumer-users-in-the-live-portal). This self-service capability allows Organisations to manage their own structure while API Owners maintain control over API access. + +Remember that users can belong to multiple teams, gaining access to all API Catalogs assigned to any of their teams. + +### Default Team + +Each Organisation has a system-generated Default Team that serves several important purposes: + +- Provides an initial home for new users in the Organisation +- Provides a home for API Consumer users who have been [removed](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#deleting-organisations) from all other Teams in the Org +- Can be used for Organisation-wide API access + +The Default Team cannot be deleted, however you can: + +- Rename it to better reflect your business needs +- Move users from it to other Teams as needed +- Use it as a holding area for users awaiting proper Team assignment + +#### Developer App visibility + +[Team level app visibility](/portal/developer-app#visibility) is not applied within the Default Team. This behavior has been implemented to prevent accidental exposure of Developer Apps if a user is removed from a team and automatically reverts to the Default Team. + +
+ + +We do not recommend using the Default Team for consumption of API Products and Plans except for Organisation-wide API access. + + + +### Best Practices for Team Management + +- **Logical grouping**: Create teams based on project needs or functional areas +- **Minimal access**: Assign only the APIs each team needs to function +- **Regular audits**: Periodically review team membership and API access +- **Descriptive naming**: Use clear, consistent naming conventions for teams +- **Documentation**: Maintain records of each team's purpose and required access + + +## Requesting a New Organisation + +The Developer Portal allows potential API Consumers to request the creation of a new Organisation during self-registration. This powerful feature balances self-service convenience with administrative control, addressing several key business needs: + +- When running an open API program that welcomes new business partners +- When scaling your API ecosystem to reach more companies without proportionally increasing administrative work +- When you want to capture interest from potential partners outside normal business hours +- When you need clear differentiation between individual developers and those representing companies + +The Organisation request feature adds value with: + +- Accelerated Onboarding: Reduces the time from initial interest to active API usage by eliminating manual Organisation creation steps +- Business Intelligence: Provides visibility into which companies are interested in your APIs, creating potential partnership opportunities +- Improved User Experience: Allows users to properly identify themselves as representing a company from the start +- Proper Governance: Maintains security through approval workflows while enabling self-service + +This self-service approach reduces administrative overhead while ensuring proper governance of your API ecosystem. It's particularly valuable for open API programs or when expanding your API consumer base. + +### Requesting a new Organisation + +1. Visit the Developer Portal and [register] without an Invite Code +2. Log in to the Developer Portal (this can be done without the account having been approved) +3. Select **Create an Organisation** + Request a new Organisation +4. Provide the requested Org with a **Name** + Specify name of the Organisation +5. Select **Create Organisation** +6. The user receives confirmation that their request is pending review + Organisation registration is pending +7. Note that if the Developer Portal settings are configured for [automatic approval](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#configuring-organisation-request-settings) of Organisation Requests without API Owner [review](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#reviewing-organisation-requests) then the Organisation will be created immediately and the requestor approved and converted to an API Consumer Admin within the new Org. + Organisation registration is approved + +### Reviewing Organisation Requests + +1. If [automatic approval](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-api-consumer-organisations#configuring-organisation-request-settings) of Organisation Requests is not set, the API Owner users will be notified of Organisation request via email. + New Organisation registration request notification +2. Navigate to **API Consumers > Organisations** in the Admin Portal +2. The requested Organisation appears as *pending* in the list +3. Select the pending Organisation to see which user made the request +4. After reviewing the request, an API Owner can use the options in the three dot menu to: + - Approve the request, activating the new Organisation with the requestor automatically becoming an API Consumer Admin + - Reject the request, with the requestor remaining a Team Member + New Organisation registration request view +5. The requestor will receive an email notifying them of the approval or rejection of the request. + +**Note**: The API Owner can modify the name of the new Org during the review, if required. + +The content of the emails sent to API Owners and API Consumers can be [customized](/portal/customization/email-notifications) to meet your business needs. + +### Configuring Organisation Request Settings + +Control whether and how users can request new Organisations by configuring the Developer Portal settings: + +1. Navigate to **Settings > General > API Consumer access** in the Admin Portal + Organisation registration settings +2. Check or clear the options: + - **Enable API consumers to register Organisations** + - **Auto-approve API consumers registering organisation** +4. Select **Save changes** + +Note that enabling auto-approval will mean there is no opportunity to review Org requests, so should only be used in carefully controlled business environments. + + +## Use Cases and Implementation Strategies + +The Organisation and Team structure in Tyk Developer Portal can be adapted to support various business models and API programs. Here are strategic approaches for common scenarios: + +### Enterprise Partner Ecosystem + +**Scenario**: Managing APIs for a network of business partners with different access needs + +**Implementation**: + - Create an Organisation for each partner company + - Structure teams based on partner's functional departments (e.g., Development, QA, Analytics) + - Assign graduated API access tiers based on partnership level + - Designate partner technical leads as API Consumer Admins + +**Benefits**: + - Clear separation between different partner companies + - Partners can self-manage their internal team structure + - Access revocation is simplified when partnerships change + - Usage analytics can be tracked at the partner company level + +### Internal Developer Program + +**Scenario**: Providing API access across departments within your own company + +**Implementation**: +- Create Organisations representing major business units or subsidiaries +- Form teams based on projects, product lines, or functional groups +- Use the Default Organisation for central IT or platform teams +- Implement consistent naming conventions that align with internal structure + +**Benefits**: +- Mirrors existing company hierarchy for easier governance +- Supports chargeback models for internal API consumption +- Enables department-specific policies and quotas +- Provides visibility into cross-departmental API usage + +### Public API Marketplace + +**Scenario**: Offering APIs to external developers with tiered access models + +**Implementation**: +- Enable Organisation self-registration requests +- Create template teams for common access patterns (Basic, Professional, Enterprise) +- Implement automated workflows for upgrading access tiers +- Use Default Teams for individual developers without complex needs + +**Benefits**: +- Scales efficiently as your developer community grows +- Supports freemium to premium conversion paths +- Allows companies to start small and expand access as needed +- Provides clear separation between individual developers and companies + +### Implementation Checklist + +Regardless of your use case, consider these factors when designing your Organisation and Team structure: +- **Scalability**: Will the structure accommodate growth in users and APIs? +- **Governance**: Does it support your compliance and security requirements? +- **Administration**: Is the overhead manageable for your API team? +- **User Experience**: Does it make sense from the API Consumer perspective? +- **Analytics**: Will you get the usage insights needed for your business? +- **Flexibility**: Can it adapt as your API program evolves? + +By thoughtfully designing your Organisation and Team structure to match your specific business needs, you can create an API program that balances security, usability, and administrative efficiency. diff --git a/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-catalogues.mdx b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-catalogues.mdx new file mode 100644 index 000000000..9097d541c --- /dev/null +++ b/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-catalogues.mdx @@ -0,0 +1,122 @@ +--- +title: "API Catalogs" +description: "Working with API Catalogs" +keywords: "Developer Portal, Tyk, Managing Access, Catalogs" +sidebarTitle: "API Catalogs" +--- + +## Introduction + +API Catalogs are curated collections of API Products and Plans that enable you to organize and present your API offerings to different developer audiences. Catalogs serve as the primary navigation and discovery mechanism in the Tyk Developer Portal, allowing you to create tailored API marketplaces for different consumer segments. + +Unlike traditional API documentation sites that present all APIs to everyone, Catalogs give you fine-grained control over who sees what. This enables you to create personalized experiences for different developer audiences - from public APIs available to anyone, to specialized offerings for specific partners or internal teams. + +Catalogs transform your API portfolio management by: + +- Segmenting API Products for different developer audiences +- Creating customized discovery experiences for different use cases +- Controlling visibility of API offerings based on business relationships +- Enabling consistent organization of related API Products + +In the Tyk Developer Portal, Catalogs act as the bridge between your API Products and your developer community, ensuring that each developer sees exactly the APIs they need. + +## Key Concepts + +### Catalog Types + +The Tyk Developer Portal supports two visibility modes for Catalogs: + +- Public Catalogs: Visible to anyone visiting your Developer Portal, even without logging in. Ideal for openly available APIs and developer recruitment. +- Private Catalogs: Visible only to authenticated users who have logged into your Developer Portal. They can be further restricted only to members of specific [teams](/portal/api-consumer). Perfect for partner-specific APIs, internal teams, or premium offerings. + +### Catalog Structure + +Each Catalog contains: + +- [API Products](/portal/api-products): The functional API offerings available in this Catalog +- [Plans](/portal/api-plans): The subscription options available for Products in this Catalog +- Visibility Settings: Controls which developers can see this Catalog +- Presentation Elements: Name, description, and other display properties + +### Catalog Relationships + +Understanding how Catalogs relate to other elements in the Developer Portal: + +- Products and Plans: A Product or Plan can appear in multiple Catalogs +- Teams and Organisations: Can be granted access to specific Custom Catalogs +- Developer Experience: Developers only see Catalogs they have access to + +## API Catalog Reference Guide + +This comprehensive reference guide details all the configurable options and features of API Catalogs in the Tyk Developer Portal. + +### Core Features + +#### Catalog Name + +The primary identifier for your Catalog within the Admin Portal, this is not exposed in the Live Portal + +- **Location**: *Catalogues > Add/Edit Catalogues > Name* +- **Purpose**: Identifies the Catalog within the Developer Portal +- **Best Practice**: Choose a clear, descriptive name that reflects the Catalog's purpose or audience + +#### Path URL + +This configuration is not currently in use and can be ignored. + +#### Sync URL with Name + +- **Location**: *Catalogues > Add/Edit Catalogues > Sync URL with Name* +- **Note**: This configuration must be checked (selected). + +### Catalog Visibility + +#### Visibility Options + +Controls which API Consumers can see and access this Catalog. + +- **Location**: *Catalogues > Add/Edit Catalogues > Visibility options* +- **Options**: + - Public: Visible to all visitors, even without logging in + - Private: Visible only to authenticated users in the teams select in the [Audience](/tyk-stack/tyk-developer-portal/enterprise-developer-portal/managing-access/manage-catalogues#audience) +- **Default**: Private +- **Best Practice**: Use the most restrictive visibility that meets your business needs + +#### Audience + +Specifies which teams can access a Private Catalog. + +- **Location**: *Catalogues > Add/Edit Catalogues > Team* +- **Selection**: Select **Add Team** then choose from any Teams created on the Developer Portal; you can add multiple teams by repeating this action +- **Behavior**: Only members of the selected teams will see this Catalog +- **Note**: Teams must be created before they can be added to the audience; any combination of Teams can be added to a Catalog's audience across any number of Organisations + +### Catalog Content + +#### Products + +Determines which API Products appear in this Catalog. + +- **Location**: *Catalogues > Add/Edit Catalogues > Products* +- **Selection**: Select one or more Products from the dropdown +- **Removal**: Click on the `x` next to the name of the Product you want to delete from the Catalog +- **Relationship**: A Product can be assigned to multiple Catalogs +- **Best Practice**: Ensure that Products and their relevant Plans are assigned to the same Catalogs + +#### Plans + +Determines which API Plans appear in this Catalog. + +- **Location**: *Catalogues > Add/Edit Catalogues > Plans* +- **Selection**: Select one or more Plans from the dropdown +- **Removal**: Click on the `x` next to the name of the Plan you want to delete from the Catalog +- **Relationship**: A Plan can be assigned to multiple Catalogs +- **Best Practice**: Ensure that Products and their relevant Plans are assigned to the same Catalogs + +## Best Practices for API Catalogs + +- Create purpose-driven Catalogs: Design each Catalog with a specific audience and purpose in mind +- Use clear naming conventions: Make Catalog names intuitive and descriptive +- Maintain consistent organization: Apply similar structures across Catalogs for a predictable developer experience +- Limit the number of Catalogs: Too many Catalogs can create confusion; aim for a manageable number +- Review access regularly: Periodically audit Custom Catalog access to ensure it remains appropriate diff --git a/tyk-stack/tyk-gateway/important-prerequisites.mdx b/tyk-stack/tyk-gateway/important-prerequisites.mdx new file mode 100644 index 000000000..352438cee --- /dev/null +++ b/tyk-stack/tyk-gateway/important-prerequisites.mdx @@ -0,0 +1,55 @@ +--- +title: "Useful Configurations when Getting started" +description: "Important prerequisites and configurations needed before proceeding with Tyk tutorials." +sidebarTitle: "Useful Configurations" +--- + +These are some common settings that you need before proceeding with other parts of our tutorials. + +## Tyk Config + +### Path to Tyk API Definitions configurations directory + +You may need to explicitly define the path in your Tyk config to the directory where you will add +the API definitions for Tyk to serve. + +```yaml +... +"app_path": "/opt/tyk-gateway/apps", +... +``` + +### Path to Policies file + +You need to explicitly set the path to your Policies JSON file in your Tyk config. + +```yaml +... + "policies": { + "policy_source": "file", + "policy_record_name": "policies/policies.json" + }, +... +``` + +### Remove Tyk Dashboard related config options + +Some config options for the Community Edition are not compatible with the Dashboard +version, which requires a license. So, **remove** any section in your Tyk config which +starts with: + +```yaml +... +"db_app_conf_options" { + ... +}, +... +``` + +## Hot reload is critical in Tyk CE + +Each time you add an API definition in Tyk CE, you need to make a hot reload API call as follows: + +```curl +curl -H "x-tyk-authorization: {your-secret}" -s https://{your-tyk-host}:{port}/tyk/reload/group | python -mjson.tool +``` diff --git a/tyk-stack/tyk-operator/create-an-api.mdx b/tyk-stack/tyk-operator/create-an-api.mdx new file mode 100644 index 000000000..b1dc97c45 --- /dev/null +++ b/tyk-stack/tyk-operator/create-an-api.mdx @@ -0,0 +1,2024 @@ +--- +title: "Create and Secure an API with Tyk Operator" +description: "Learn how to create an API using Tyk Operator in Kubernetes" +keywords: "Tyk Operator, Kubernetes, API Management" +sidebarTitle: "Create an API" +--- + +## Introduction + +Tyk Operator allows you to manage your Tyk APIs, policies, and other configurations using Kubernetes Custom Resource Definitions (CRDs). This page will help you create an API using Tyk Operator. + +## Set Up Tyk OAS API +Setting up OpenAPI Specification (OAS) APIs with Tyk involves preparing an OAS-compliant API definition and configuring it within your Kubernetes cluster using Tyk Operator. This process allows you to streamline API management by storing the OAS definition in a Kubernetes ConfigMap and linking it to Tyk Gateway through a TykOasApiDefinition resource. + +### Create your Tyk OAS API +#### Prepare the Tyk OAS API Definition +First, you need to have a complete Tyk OAS API definition file ready. This file will contain all the necessary configuration details for your API in OpenAPI Specification (OAS) format. + +Here is an example of what the Tyk OAS API definition might look like. Note that Tyk extension `x-tyk-api-gateway` section should be present. + +```json {hl_lines=["9-25"],linenos=true} +{ + "info": { + "title": "Petstore", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": {}, + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "Petstore", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://petstore.swagger.io/v2" + }, + "server": { + "listenPath": { + "value": "/petstore/", + "strip": true + } + } + } +} +``` + +Save this API definition file (e.g., `oas-api-definition.json`) locally. + + + +**Tips** + +You can create and configure your API easily using Tyk Dashboard in a developer environment, and then obtain the Tyk OAS API definition following these instructions: + +1. Open the Tyk Dashboard +2. Navigate to the API you want to manage with the Tyk Operator +3. Click on the "Actions" menu button and select "View API Definition." +4. This will display the raw Tyk OAS API definition of your API, which you can then copy and save locally. + + + +#### Create a ConfigMap for the Tyk OAS API Definition + +You need to create a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/#configmap-object) in Kubernetes to store your Tyk OAS API definition. The Tyk Operator will reference this ConfigMap to retrieve the API configuration. + +To create the ConfigMap, run the following command: + +```sh +kubectl create configmap tyk-oas-api-config --from-file=oas-api-definition.json -n tyk +``` + +This command creates a ConfigMap named `tyk-oas-api-config` in the `tyk` namespace (replace `tyk` with your actual namespace if different). + + + +**Notes** + +There is inherent size limit to a ConfigMap. The data stored in a ConfigMap cannot exceed 1 MiB. In case your OpenAPI document exceeds this size limit, it is recommended to split your API into smaller sub-APIs for easy management. For details, please consult [Best Practices for Describing Large APIs](https://learn.openapis.org/best-practices.html#describing-large-apis) from the OpenAPI initiative. + + + + + +**Notes** + +If you prefer to create ConfigMap with a manifest using `kubectl apply` command, you may get an error that the annotation metadata cannot exceed 256KB. It is because by using `kubectl apply`, `kubectl` automatically saves the whole configuration in the annotation [kubectl.kubernetes.io/last-applied-configuration](https://kubernetes.io/docs/reference/labels-annotations-taints/#kubectl-kubernetes-io-last-applied-configuration) for tracking changes. Your Tyk OAS API Definition may easily exceed the size limit of annotations (256KB). Therefore, `kubectl create` is used here to get around the problem. + + + +#### Create a TykOasApiDefinition Custom Resource + +Now, create a `TykOasApiDefinition` resource to tell the Tyk Operator to use the Tyk OAS API definition stored in the ConfigMap. + +Create a manifest file named `tyk-oas-api-definition.yaml` with the following content: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: petstore +spec: + tykOAS: + configmapRef: + name: tyk-oas-api-config # Metadata name of the ConfigMap resource that stores the Tyk OAS API Definition + namespace: tyk # Metadata namespace of the ConfigMap resource + keyName: oas-api-definition.json # Key for retrieving Tyk OAS API Definition from the ConfigMap +``` + +#### Apply the TykOasApiDefinition Manifest + +Use `kubectl` to apply the `TykOasApiDefinition` manifest to your cluster: + +```sh +kubectl apply -f tyk-oas-api-definition.yaml +``` + +This command creates a new `TykOasApiDefinition` resource in your cluster. The Tyk Operator will watch for this resource and configures Tyk Gateway or Tyk Dashboard with a new API using the provided Tyk OAS API definition. + +#### Verify the Tyk OAS API Creation + +To verify that the API has been successfully created, check the status of the TykOasApiDefinition resource: + +```sh +kubectl get tykoasapidefinition petstore +``` + +You should see the status of the resource, which will indicate if the API creation was successful. + +```bash +NAME DOMAIN LISTENPATH PROXY.TARGETURL ENABLED SYNCSTATUS INGRESSTEMPLATE +petstore /petstore/ https://petstore.swagger.io/v2 true Successful +``` + +#### Test the Tyk OAS API +After the Tyk OAS API has been successfully created, you can test it by sending a request to the API endpoint defined in your OAS file. + +For example, if your API endpoint is `/store/inventory"`, you can use `curl` or any API client to test it: + +```sh +curl "TYK_GATEWAY_URL/petstore/store/inventory" +``` + +Replace TYK_GATEWAY_URL with a URL of Tyk Gateway. + +#### Manage and Update the Tyk OAS API +To make any changes to your API configuration, update the OAS file in your ConfigMap and then re-apply the ConfigMap using `kubectl replace`: + +```sh +kubectl create configmap tyk-oas-api-config --from-file=oas-api-definition.json -n tyk --dry-run=client -o yaml | kubectl replace -f - +``` + +The Tyk Operator will automatically detect the change and update the API in the Tyk Gateway. + + + +**Notes** + +`kubectl replace` without `--save-config` option is used here instead of `kubectl apply` because we do not want to save the Tyk OAS API definition in its annotation. If you want to enable `--save-config` option or use `kubectl apply`, the Tyk OAS API definition size would be further limited to at most 262144 bytes. + + + +#### Tyk OAS API Example +This example shows the minimum resources and fields required to define a Tyk OAS API using Tyk Operator. + +```yaml{hl_lines=["7-7", "41-44"],linenos=true} +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm + namespace: default +data: + test_oas.json: |- + { + "info": { + "title": "Petstore", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": {}, + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "Petstore", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://petstore.swagger.io/v2" + }, + "server": { + "listenPath": { + "value": "/petstore/", + "strip": true + } + } + } + } +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: petstore +spec: + tykOAS: + configmapRef: + name: cm + namespace: default + keyName: test_oas.json +``` + +Here, a `ConfigMap` is created that contains the Tyk OAS API Definition with the `data` field with key `test_oas.json`. This is linked to from a `TykOasApiDefinition` resource via `spec.tykOAS.configmapRef`. + +To apply it, simply save the manifest into a file (e.g., `tyk-oas-api.yaml`) and use `kubectl apply -f tyk-oas-api.yaml` to create the required resources in your Kubernetes cluster. This command will create the necessary ConfigMap and TykOasApiDefinition resources in the `default` namespace. + + + +### Secure your Tyk OAS API +#### Update your Tyk OAS API Definition + +First, you'll modify your existing Tyk OAS API Definition to include the API key authentication configuration. + +When creating the Tyk OAS API, you stored your OAS definition in a file named `oas-api-definition.json` and created a ConfigMap named `tyk-oas-api-config` in the `tyk` namespace. + +Modify your Tyk OAS API Definition `oas-api-definition.json` as follow. + +```json {hl_lines=["8-14","16-20","33-40"],linenos=true} +{ + "info": { + "title": "Petstore protected", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "components": { + "securitySchemes": { + "petstore_auth": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + } + }, + "security": [ + { + "petstore_auth": [] + } + ], + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "Petstore", + "state": { + "active": true + } + }, + "upstream": { + "url": "https://petstore.swagger.io/v2" + }, + "server": { + "authentication": { + "enabled": true, + "securitySchemes": { + "petstore_auth": { + "enabled": true + } + } + }, + "listenPath": { + "value": "/petstore/", + "strip": true + } + } + } +} +``` + +In this example, we added the following sections to configure key authentication for this API. + +- `components.securitySchemes` defines the authentication method (in this case, `apiKey` in the header). +- `security`: Applies the authentication globally to all endpoints. +- `x-tyk-api-gateway.server.authentication`: Tyk-specific extension to enable the authentication scheme. + +You can configure your API for any Tyk supported authentication method by following the [Client Authentication](/api-management/client-authentication) documentation. + +Save your updated API definition in the same file, `oas-api-definition.json`. + +#### Update the ConfigMap with the new Tyk OAS API Definition + +Update the existing ConfigMap that contains your Tyk OAS API Definition with the following command: + +```sh +kubectl create configmap tyk-oas-api-config --from-file=oas-api-definition.json -n tyk --dry-run=client -o yaml | kubectl replace -f - +``` + +This command updates the existing ConfigMap named `tyk-oas-api-config` in the `tyk` namespace (replace `tyk` with your actual namespace if different) with the new Tyk OAS API Definition stored in `oas-api-definition.json`. + +Since a `TykOasApiDefinition` resource has been created with reference to this ConfigMap in the previous tutorial: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykOasApiDefinition +metadata: + name: petstore +spec: + tykOAS: + configmapRef: + name: tyk-oas-api-config # Metadata name of the ConfigMap resource that stores the Tyk OAS API Definition + namespace: tyk # Metadata namespace of the ConfigMap resource + keyName: oas-api-definition.json # Key for retrieving Tyk OAS API Definition from the ConfigMap +``` + +Any changes in the ConfigMap would be detected by Tyk Operator. Tyk Operator will then automatically reconcile the changes and update the API configuration at Tyk. + +#### Verify the changes + +Verify that the `TykOasApiDefinition` has been updated successfully: + +```sh +kubectl get tykoasapidefinition petstore -o yaml +``` + +Look for the `latestTransaction` field in `status`: + +```yaml +status: + latestTransaction: + status: Successful + time: "2024-09-16T11:48:20Z" +``` + +The **Successful** status shows that Tyk Operator has reconciled the API with Tyk successfully. The last update time is shown in the `time` field. + +#### Test the API Endpoint +Now, test your API endpoint to confirm that it requires an API key. + +For example, if your API endpoint is `/store/inventory"`, you can use `curl` or any API client to test it: + +```sh +curl -v "TYK_GATEWAY_URL/petstore/store/inventory" +``` + +Replace TYK_GATEWAY_URL with a URL of Tyk Gateway. + +Request should fail with a `401 Unauthorized` response now as an API key is required for access. Your API has been secured by Tyk Gateway. + +## Set Up Tyk Classic API + +### Create a Tyk Classic API +First, specify the details of your API using the [ApiDefinition CRD](/api-management/automations/operator#apidefinition-crd), then deploy it to create the corresponding Kubernetes resource. Tyk Operator will take control of the CRD and create the actual API in the Tyk data plane. + +#### Create an ApiDefinition resource in YAML format +Create a file called `httpbin.yaml`, then add the following: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +You can also use other sample files from the following pages: + +- [HTTP Proxy example](/tyk-stack/tyk-operator/create-an-api#set-up-manifest-for-http) +- [TCP Proxy example](#set-up-manifest-for-tcp) +- [GraphQL Proxy example](#set-up-manifest-for-graphql) +- [UDG example](#set-up-manifest-for-udg) + +#### Deploy the ApiDefinition resource +We are going to create an ApiDefinition from the httpbin.yaml file, by running the following command: + +```console +$ kubectl apply -f httpbin.yaml +``` + +Or, if you don’t have the manifest with you, you can run the following command: + +```yaml +cat <.`.svc.cluster.local DNS entry once they are created. +For example, if you have a service called `httpbin` in `default` namespace, you can contact `httpbin` service with `httpbin.default.svc` DNS record in the cluster, instead of IP addresses. +Please visit the official [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) for more details. +Suppose you want to create a Deployment of [httpbin](https://hub.docker.com/r/kennethreitz/httpbin/) service using [ci/upstreams/httpbin.yaml](https://github.com/TykTechnologies/tyk-operator/blob/master/ci/upstreams/httpbin.yaml) file. You are going to expose the application through port `8000` as described under the Service [specification](https://github.com/TykTechnologies/tyk-operator/blob/master/ci/upstreams/httpbin.yaml#L10). +You can create Service and Deployment by either applying the manifest defined in our repository: + +```console +$ kubectl apply -f ci/upstreams/httpbin.yaml +``` + +Or, if you don’t have the manifest with you, you can run the following command: + +```yaml +cat <` namespace as follows: + +```console +$ kubectl get service -n +``` + +You can update your `httpbin` as follows: + +```yaml +cat <..svc:`). +Now, if you send your request to the `/httpbin` endpoint of the Tyk Gateway, the request will be proxied to the `httpbin Service`: + +```curl +curl -sS http://localhost:8080/httpbin/headers +{ + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Host": "httpbin.default.svc:8000", + "User-Agent": "curl/7.68.0" + } +} +``` + +As you can see from the response, the host that your request should be proxied to is `httpbin.default.svc:8000`. + +### Secure your Classic API +#### Update your API to Require a Key + +You might already have realized that our `httpbin` API is keyless. If you check the APIDefinition's specification, the `use_keyless` field is set to `true`. +Tyk keyless access represents completely open access for your API and causes Tyk to bypass any session-based middleware (middleware that requires access to token-related metadata). Keyless access will enable all requests through. +You can disable keyless access by setting `use_keyless` to false. + +1. Update your `httpbin.yaml` file + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: false + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +2. Apply the changes + +```bash +kubectl apply -f httpbin.yaml +``` + +Or, if you don’t have the manifest with you, you can run the following command: + +```yaml +cat < +Tyk Operator supported authentication types are listed in the [API Definition features](/api-management/automations/operator#apidefinition-crd) section. + + + +#### Create an API key + +You need to generate a key to access the `httpbin` API now. Follow [this guide](/getting-started/configure-first-api#create-an-api-key) to see how to create an API key for your installation. + +You can obtain the API name and API ID of our example `httpbin` API by following command: + +```yaml +kubectl describe tykapis httpbin +Name: httpbin +Namespace: default +Labels: +Annotations: +API Version: tyk.tyk.io/v1alpha1 +Kind: ApiDefinition +Metadata: + ... +Spec: + ... + Name: httpbin + ... +Status: + api_id: ZGVmYXVsdC9odHRwYmlu +Events: +``` + +You can obtain the API name and API ID from `name` and `status.api_id` field. + +In our example, it is as follows: + +- API-NAME: httpbin +- API-ID: ZGVmYXVsdC9odHRwYmlu + +When you have successfully created a key, you can use it to access the `httpbin` API. + +```curl +curl -H "Authorization: Bearer {Key ID}" localhost:8080/httpbin/get +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Authorization": "Bearer {Key ID}", + "Host": "httpbin.org", + "User-Agent": "curl/7.77.0", + "X-Amzn-Trace-Id": "Root=1-6221de2a-01aa10dd56f6f13f420ba313" + }, + "origin": "127.0.0.1, 176.42.143.200", + "url": "http://httpbin.org/get" +} +``` +Since you have provided a valid key along with your request, you do not get a `HTTP 401 Unauthorized` response. + + +### Set Up Tyk Classic API Authentication +Client to Gateway Authentication in Tyk ensures secure communication between clients and the Tyk Gateway. Tyk supports various authentication methods to authenticate and authorize clients before they can access your APIs. These methods include API keys, Static Bearer Tokens, JWT, mTLS, Basic Authentication, and more. This document provides example manifests for each authentication method supported by Tyk. + +#### Keyless (Open) + +This configuration allows [keyless (open)](/basic-config-and-security/security/authentication-authorization/open-keyless) access to the API without any authentication. + +```yaml {hl_lines=["7-7"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-keyless +spec: + name: httpbin-keyless + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +#### Auth Token (Bearer Token) + +This setup requires a [bearer token](/api-management/authentication/bearer-token) for access. + +In the below example, the authentication token is set by default to the `Authorization` header of the request. You can customize this behavior by configuring the following fields: + +- `use_cookie`: Set to true to use a cookie value for the token. +- `cookie_name`: Specify the name of the cookie if use_cookie is enabled. +- `use_param`: Set to true to allow the token to be passed as a query parameter. +- `param_name`: Specify the parameter name if use_param is enabled. +- `use_certificate`: Enable client certificate. This allows you to create dynamic keys based on certificates. +- `validate_signature`: Enable [signature validation](/api-management/authentication/bearer-token#auth-token-with-signature). + +```yaml {hl_lines=["13-35"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-auth-token +spec: + name: httpbin-auth-token + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + use_standard_auth: true + auth_configs: + authToken: + # Auth Key Header Name + auth_header_name: Authorization + # Use cookie value + use_cookie: false + # Cookie name + cookie_name: "" + # Allow query parameter as well as header + use_param: false + # Parameter name + param_name: "" + # Enable client certificate + use_certificate: false + # Enable Signature validation + validate_signature: false + signature: + algorithm: "" + header: "" + secret: "" + allowed_clock_skew: 0 + error_code: 0 +``` + +#### JWT + +This configuration uses [JWT tokens](/basic-config-and-security/security/authentication-authorization/json-web-tokens) for authentication. + +Users can configure JWT authentication by defining the following fields: + +- `jwt_signing_method`: Specify the method used to sign the JWT. Refer to the documentation on [JWT Signatures](/basic-config-and-security/security/authentication-authorization/json-web-tokens#signature-validation) for supported methods. +- `jwt_source`: Specify the public key used for verifying the JWT. +- `jwt_identity_base_field`: Define the identity source, typically set to `sub` (subject), which uniquely identifies the user or entity. +- `jwt_policy_field_name`: Specify the claim within the JWT payload that indicates the policy ID to apply. +- `jwt_default_policies` (Optional): Define default policies to apply if no policy claim is found in the JWT payload. + +The following example configures an API to use JWT authentication. It specifies the ECDSA signing method and public key, sets the `sub` claim as the identity source, uses the `pol` claim for policy ID, and assigns a default policy (`jwt-policy` SecurityPolicy in `default` namespace) if no policy is specified in the token. + +```yaml {hl_lines=["13-22"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-jwt1 +spec: + name: httpbin-jwt1 + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin-jwt1 + strip_listen_path: true + enable_jwt: true + strip_auth_data: true + jwt_signing_method: ecdsa + # ecdsa pvt: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ2V2WnpMMWdkQUZyODhoYjIKT0YvMk54QXBKQ3pHQ0VEZGZTcDZWUU8zMGh5aFJBTkNBQVFSV3oram42NUJ0T012ZHlIS2N2akJlQlNEWkgycgoxUlR3am1ZU2k5Ui96cEJudVE0RWlNbkNxZk1QV2lacUI0UWRiQWQwRTdvSDUwVnB1WjFQMDg3RwotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0t + # ecdsa pub: LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFRVZzL281K3VRYlRqTDNjaHluTDR3WGdVZzJSOQpxOVVVOEk1bUVvdlVmODZRWjdrT0JJakp3cW56RDFvbWFnZUVIV3dIZEJPNkIrZEZhYm1kVDlQT3hnPT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0t + jwt_source: LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFRVZzL281K3VRYlRqTDNjaHluTDR3WGdVZzJSOQpxOVVVOEk1bUVvdlVmODZRWjdrT0JJakp3cW56RDFvbWFnZUVIV3dIZEJPNkIrZEZhYm1kVDlQT3hnPT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0t + jwt_identity_base_field: sub + jwt_policy_field_name: pol + jwt_default_policies: + - default/jwt-policy +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: jwt-policy +spec: + access_rights_array: + - name: httpbin-jwt1 + namespace: default + versions: + - Default + active: true + name: jwt-policy + state: active +``` + +You can verify the API is properly authenticated with following command: + +1. JWT with default policy +```bash +curl http://localhost:8080/httpbin-jwt1/get -H 'Authorization: Bearer eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0IiwiaWF0IjoxNTE2MjM5MDIyfQ.rgPyrCJYs2im7zG6im5XUqsf_oAf_Kqk-F6IlLb3yzZCSZvrQObhBnkLKgfmVTbhQ5El7Q6KskXPal5-eZFuTQ' +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Host": "httpbin.org", + "Traceparent": "00-d2b93d763ca27f29181c8e508b5ac0c9-a446afa3bd053617-01", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-6696f0bf-1d9e532c6a2eb3a709e7086b" + }, + "origin": "127.0.0.1, 178.128.43.98", + "url": "http://httpbin.org/get" +} +``` + +2. JWT with explicit policy +```bash +curl http://localhost:8080/httpbin-jwt1/get -H 'Authorization: Bearer eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0IiwiaWF0IjoxNTE2MjM5MDIyLCJwb2wiOiJaR1ZtWVhWc2RDOXFkM1F0Y0c5c2FXTjUifQ.7nY9TvYgsAZqIHLhJdUPqZtzqU_5T-dcNtCt4zt8YPyUj893Z_NopL6Q8PlF8TlMdxUq1Ff8rt4-p8gVboIqlA' +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip", + "Host": "httpbin.org", + "Traceparent": "00-002adf6632ec20377cb7ccf6c3037e78-3c4cb97c70d790cb-01", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-6696f1dd-7f9de5f947c8c73279f7cca6" + }, + "origin": "127.0.0.1, 178.128.43.98", + "url": "http://httpbin.org/get" +} +``` + +#### Basic Authentication + +This configuration uses [Basic Authentication](/api-management/authentication/basic-authentication), requiring a username and password for access. + +```yaml {hl_lines=["13-13"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-basic-auth +spec: + name: Httpbin Basic Authentication + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + use_basic_auth: true +``` + +#### Custom Plugin Auth (go) + +This configuration uses a [Golang plugin](/api-management/plugins/golang#) for custom authentication. The following example shows how to create an API definition with a Golang custom plugin for `httpbin-go-auth`. + +For an example of Golang authentication middleware, see [Performing custom authentication with a Golang plugin](/api-management/plugins/golang#performing-custom-authentication-with-a-golang-plugin). + +```yaml {hl_lines=["7-7", "14-21"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-go-auth +spec: + name: httpbin-go-auth + use_go_plugin_auth: true # Turn on GO auth + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + custom_middleware: + driver: goplugin + pre: + - name: "AddFooBarHeader" + path: "/mnt/tyk-gateway/example-go-plugin.so" + auth_check: + name: "MyPluginCustomAuthCheck" + path: "/mnt/tyk-gateway/example-go-plugin.so" +``` + +#### Custom Plugin Auth (gRPC) + +This configuration uses a [gRPC plugin](/api-management/plugins/golang#) for custom authentication. The following example shows how to create an API definition with a gRPC custom plugin for `httpbin-grpc-auth`. + +For a detailed walkthrough on setting up Tyk with gRPC authentication plugins, refer to [Extending Tyk with gRPC Authentication Plugins](https://tyk.io/blog/how-to-setup-custom-authentication-middleware-using-grpc-and-java/). + +```yaml {hl_lines=["9-9", "14-26"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-grpc-auth +spec: + name: httpbin-grpc-auth + protocol: http + active: true + enable_coprocess_auth: true + proxy: + target_url: http://httpbin.default.svc:8000 + listen_path: /httpbin-grpc-auth + strip_listen_path: true + custom_middleware: + driver: grpc + post_key_auth: + - name: "HelloFromPostKeyAuth" + path: "" + auth_check: + name: foo + path: "" + id_extractor: + extract_from: header + extract_with: value + extractor_config: + header_name: Authorization +``` + +#### Multiple (Chained) Auth + +This setup allows for [multiple authentication](/basic-config-and-security/security/authentication-authorization/multiple-auth) methods to be chained together, requiring clients to pass through each specified authentication provider. + +To enable multiple (chained) auth, you should set `base_identity_provided_by` field to one of the supported chained enums. Consult the [Multi (Chained) Authentication](/basic-config-and-security/security/authentication-authorization/multiple-auth) section for the supported auths. + +In this example, we are creating an API definition with basic authentication and mTLS with basic authentication as base identity for `httpbin-multiple-authentications`. + +```yaml {hl_lines=["19-21"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin-multiple-authentications +spec: + name: Httpbin Multiple Authentications + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + base_identity_provided_by: basic_auth_user + use_basic_auth: true + use_mutual_tls_auth: true +``` + +#### IP Allowlist + +To enable [IP Allowlist](/api-management/gateway-config-tyk-classic#ip-access-control), set the following fields: + +* `enable_ip_whitelisting`: Enables IPs allowlist. When set to `true`, only requests coming from the explicit list of IP addresses defined in (`allowed_ips`) are allowed through. +* `allowed_ips`: A list of strings that defines the IP addresses (in [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) notation) that are allowed access via Tyk. + +In this example, only requests coming from 127.0.0.2 is allowed. + +```yaml {hl_lines=["10-12"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: http + active: true + enable_ip_whitelisting: true + allowed_ips: + - 127.0.0.2 + proxy: + target_url: http://httpbin.default.svc:8000 + listen_path: /httpbin + strip_listen_path: true +``` + +#### IP Blocklist + +To enable [IP Blocklist](/api-management/gateway-config-tyk-classic#ip-access-control), set the following fields: + +* `enable_ip_blacklisting`: Enables IPs blocklist. If set to `true`, requests coming from the explicit list of IP addresses (blacklisted_ips) are not allowed through. +* `blacklisted_ips`: A list of strings that defines the IP addresses (in [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) notation) that are blocked access via Tyk. This list is explicit and wildcards are currently not supported. + +In this example, requests coming from 127.0.0.2 will be forbidden (`403`). + +```yaml {hl_lines=["10-12"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: http + active: true + enable_ip_blacklisting: true + blacklisted_ips: + - 127.0.0.2 + proxy: + target_url: http://httpbin.default.svc:8000 + listen_path: /httpbin + strip_listen_path: true +``` + + +### Set Up Manifest for GraphQL +In the example below we can see that the configuration is contained within the `graphql` configuration object. A GraphQL schema is specified within the `schema` field and the execution mode is set to `proxyOnly`. The [GraphQL public playground](/api-management/graphql#enabling-public-graphql-playground) is enabled with the path set to `/playground`. + +```yaml {hl_lines=["15-17", "18-92"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: trevorblades +spec: + name: trevorblades + use_keyless: true + protocol: http + active: true + proxy: + target_url: https://countries.trevorblades.com + listen_path: /trevorblades + strip_listen_path: true + graphql: + enabled: true + version: "2" + execution_mode: proxyOnly + schema: | + directive @cacheControl(maxAge: Int, scope: CacheControlScope) on FIELD_DEFINITION | OBJECT | INTERFACE + + enum CacheControlScope { + PUBLIC + PRIVATE + } + + type Continent { + code: ID! + name: String! + countries: [Country!]! + } + + input ContinentFilterInput { + code: StringQueryOperatorInput + } + + type Country { + code: ID! + name: String! + native: String! + phone: String! + continent: Continent! + capital: String + currency: String + languages: [Language!]! + emoji: String! + emojiU: String! + states: [State!]! + } + + input CountryFilterInput { + code: StringQueryOperatorInput + currency: StringQueryOperatorInput + continent: StringQueryOperatorInput + } + + type Language { + code: ID! + name: String + native: String + rtl: Boolean! + } + + input LanguageFilterInput { + code: StringQueryOperatorInput + } + + type Query { + continents(filter: ContinentFilterInput): [Continent!]! + continent(code: ID!): Continent + countries(filter: CountryFilterInput): [Country!]! + country(code: ID!): Country + languages(filter: LanguageFilterInput): [Language!]! + language(code: ID!): Language + } + + type State { + code: String + name: String! + country: Country! + } + + input StringQueryOperatorInput { + eq: String + ne: String + in: [String] + nin: [String] + regex: String + glob: String + } + + """The `Upload` scalar type represents a file upload.""" + scalar Upload + playground: + enabled: true + path: /playground +``` + +### Set Up Manifest for HTTP +#### HTTP Proxy + +This example creates a basic API definition that routes requests to listen path `/httpbin` to target URL `http://httpbin.org`. + +Traffic routing can be configured under `spec.proxy`: +- `target_url` defines the upstream address (or target URL) to which requests should be proxied. +- `listen_path` is the base path on Tyk to which requests for this API should be sent. Tyk listens out for any requests coming into the host at this path, on the port that Tyk is configured to run on and processes these accordingly. For example, `/api/` or `/` or `/httpbin/`. +- `strip_listen_path` removes the inbound listen path (as accessed by the client) when generating the outbound request for the upstream service. For example, consider the scenario where the Tyk base address is `http://acme.com/`, the listen path is `example/` and the upstream URL is `http://httpbin.org/`: If the client application sends a request to `http://acme.com/example/get` then the request will be proxied to `http://httpbin.org/example/get` + +```yaml {hl_lines=["10-13"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +#### HTTP Host-based Proxy + +`spec.domain` is the domain to bind this API to. This enforces domain matching for client requests. + +In this example, requests to `httpbin.tyk.io` will be proxied to upstream URL `http://httpbin.org` + +```yaml {hl_lines=["10-10"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: http + active: true + domain: httpbin.tyk.io + proxy: + target_url: http://httpbin.org + listen_path: / + strip_listen_path: true +``` + +#### HTTPS Proxy + +This example creates a API definition that routes requests to a http://httpbin.org via port 8443. + +```yaml {hl_lines=["35-38"],linenos=false} +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: { } +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: my-test-cert +spec: + secretName: my-test-tls + dnsNames: + - foo.com + - bar.com + privateKey: + rotationPolicy: Always + issuerRef: + name: selfsigned-issuer + # We can reference ClusterIssuers by changing the kind here. + # The default value is Issuer (i.e. a locally namespaced Issuer) + kind: Issuer + # This is optional since cert-manager will default to this value however + # if you are using an external issuer, change this to that issuer group. + group: cert-manager.io +--- +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: httpbin +spec: + name: httpbin + use_keyless: true + protocol: https + listen_port: 8443 + certificate_secret_names: + - my-test-tls + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +### Set Up Manifest for TCP + +This example creates a API definition that proxies request from TCP port `6380` to `tcp://localhost:6379`. + +```yaml {hl_lines=["8-11"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: redis-tcp +spec: + name: redis-tcp + active: true + protocol: tcp + listen_port: 6380 + proxy: + target_url: tcp://localhost:6379 +``` + +### Set Up Manifest for UDG +#### UDG v2 (Tyk 3.2 and above) + +If you are on Tyk 3.2 and above, you can use the following manifest to create an UDG API. This example configures a Universal Data Graph from a [GraphQL datasource](/api-management/data-graph#graphql) and a [REST Datasource](/api-management/data-graph#rest). + +```yaml {hl_lines=["20-39", "46-80"],linenos=false} +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: udg +spec: + name: Universal Data Graph v2a + use_keyless: true + protocol: http + active: true + proxy: + target_url: "" + listen_path: /udg + strip_listen_path: true + version_data: + default_version: Default + not_versioned: true + versions: + Default: + name: Default + graphql: + enabled: true + execution_mode: executionEngine + schema: | + type Country { + name: String + code: String + restCountry: RestCountry + } + + type Query { + countries: [Country] + } + + type RestCountry { + altSpellings: [String] + subregion: String + population: Int + } + version: "2" + last_schema_update: "2022-10-12T14:27:55.511+03:00" + type_field_configurations: [] + playground: + enabled: true + path: /playground + engine: + field_configs: + - disable_default_mapping: false + field_name: countries + path: + - "countries" + type_name: Query + - disable_default_mapping: true #very important for rest APIs + field_name: restCountry + path: [] + type_name: Country + data_sources: + - kind: "GraphQL" + name: "countries" + internal: false + root_fields: + - type: Query + fields: + - "countries" + config: + url: "https://countries.trevorblades.com/" + method: "POST" + headers: {} + body: "" + - kind: "REST" + internal: false + name: "restCountries" + root_fields: + - type: "Country" + fields: + - "restCountry" + config: + url: "https://restcountries.com/v2/alpha/{{ .object.code }}" + method: "GET" + body: "" + headers: {} +``` + +#### UDG v1 (Tyk 3.1 or before) + +If you are on Tyk 3.1, you can use the following manifest to create an UDG API. This example creates a Universal Data Graph with GraphQL datasource and HTTP JSON datasource. + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: udg +spec: + name: Universal Data Graph Example + use_keyless: true + protocol: http + active: true + proxy: + target_url: "" + listen_path: /udg + strip_listen_path: true + graphql: + enabled: true + execution_mode: executionEngine + schema: | + type Country { + name: String + code: String + restCountry: RestCountry + } + + type Query { + countries: [Country] + } + + type RestCountry { + altSpellings: [String] + subregion: String + population: String + } + type_field_configurations: + - type_name: Query + field_name: countries + mapping: + disabled: false + path: countries + data_source: + kind: GraphQLDataSource + data_source_config: + url: "https://countries.trevorblades.com" + method: POST + status_code_type_name_mappings: [] + - type_name: Country + field_name: restCountry + mapping: + disabled: true + path: "" + data_source: + kind: HTTPJSONDataSource + data_source_config: + url: "https://restcountries.com/v2/alpha/{{ .object.code }}" + method: GET + default_type_name: RestCountry + status_code_type_name_mappings: + - status_code: 200 + playground: + enabled: true + path: /playground +``` + +## Set Up Tyk Streams API +Tyk Streams integrates natively with Tyk OpenAPI Specification (OAS), allowing you to manage APIs as code and automate processes in Kubernetes using Tyk Operator. Setting up Tyk Streams API is similar to configuring a standard Tyk OAS API. You can store the Tyk Streams OAS definition in a Kubernetes ConfigMap and connect it to Tyk Gateway through a `TykStreamsApiDefinition` resource. + +### Create your Tyk Streams API +#### Prepare the Tyk Streams API Definition +To create a Tyk Streams API, start by preparing a complete Tyk Streams API definition in the OpenAPI Specification (OAS) format. This file must include: + +- The `x-tyk-api-gateway` extension for Tyk-specific settings. +- The `x-tyk-streaming` extension for Tyk Streams configuration. + +Here’s an example of a Tyk Streams API definition: + +```json {hl_lines=["17-54"],linenos=true} +{ + "info": { + "title": "Simple streaming demo", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "servers": [ + { + "url": "http://tyk-gw.local/streams/" + } + ], + "security": [], + "paths": {}, + "components": { + "securitySchemes": {} + }, + "x-tyk-streaming": { + "streams": { + "example-publisher": { + "input": { + "http_server": { + "allowed_verbs": [ + "POST" + ], + "path": "/pub", + "timeout": "1s" + } + }, + "output": { + "http_server": { + "ws_path": "/ws" + } + } + } + } + }, + "x-tyk-api-gateway": { + "info": { + "name": "Simple streaming demo", + "state": { + "active": true, + "internal": false + } + }, + "server": { + "listenPath": { + "strip": true, + "value": "/streams/" + } + }, + "upstream": { + "url": "https://not-needed" + } + } +} +``` + +#### Create a TykStreamsApiDefinition Custom Resource +Once your Tyk Streams API definition is ready, use a Kubernetes ConfigMap to store the definition and link it to a `TykStreamsApiDefinition` custom resource. + +Example manifest: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: TykStreamsApiDefinition +metadata: + name: simple-stream +spec: + tykStreams: + configmapRef: + name: simple-stream-cm #k8s resource name of configmap + namespace: default #The k8s namespace of the resource being targeted. If Namespace is not provided, + #we assume that the ConfigMap is in the same namespace as TykStreamsApiDefinition resource. + keyName: test_stream.json +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: simple-stream-cm +data: + test_stream.json: |- + { + "components": {}, + "info": { + "title": "Simple streaming demo", + "version": "1.0.0" + }, + "openapi": "3.0.3", + "paths": {}, + "x-tyk-api-gateway": { + "info": { + "name": "Simple streaming demo", + "state": { + "active": true + } + }, + "server": { + "detailedTracing": { + "enabled": true + }, + "listenPath": { + "strip": true, + "value": "/streams/" + } + }, + "upstream": { + "url": "https://not-needed" + } + }, + "x-tyk-streaming": { + "streams": { + "example-publisher": { + "input": { + "http_server": { + "path": "/pub", + "allowed_verbs": ["POST"], + "timeout": "1s" + } + }, + "output": { + "http_server": { + "ws_path": "/ws" + } + } + } + } + } + } +``` + +#### Apply the TykStreamsApiDefinition Manifest + +Use the `kubectl` command to apply the `TykStreamsApiDefinition` manifest to your Kubernetes cluster: + +```sh +kubectl apply -f tyk-streams-api-definition.yaml +``` + +This will create a new `TykStreamsApiDefinition` resource. The Tyk Operator watches this resource and configures the Tyk Gateway or Tyk Dashboard with the new API. + +#### Verify the Tyk Streams API Creation + +Check the status of the `TykStreamsApiDefinition` resource to ensure that the API has been successfully created: + +```sh +kubectl get tykstreamsapidefinitions simple-stream +``` + +You should see output similar to this: + +```bash +NAME DOMAIN LISTENPATH ENABLED SYNCSTATUS +simple-stream /streams/ true Successful +``` + +#### Manage and Update the Tyk Streams API +To update your API configuration, modify the linked `ConfigMap`. The Tyk Operator will automatically detect changes and update the API in the Tyk Gateway. + +### Secure your Tyk Streams API +To secure your Tyk Streams API, configure security fields in the OAS definition just as you would for a standard Tyk OAS API. For more details, refer to the [Secure your Tyk OAS API](#secure-your-tyk-oas-api) guide. + +## Add a Security Policy to your API +To further protect access to your APIs, you will want to add a security policy. +Below, we take you through how to define the security policy but you can also find [Security Policy Example](/tyk-stack/tyk-operator/create-an-api#security-policy-example) below. + +### Define the Security Policy manifest + +To create a security policy, you must define a Kubernetes manifest using the `SecurityPolicy` CRD. The following example illustrates how to configure a default policy for trial users for a Tyk Classic API named `httpbin`, a Tyk OAS API named `petstore`, and a Tyk Streams API named `http-to-kafka`. + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: trial-policy # Unique Kubernetes name +spec: + name: Default policy for trial users # Descriptive name for the policy + state: active + active: true + access_rights_array: + - name: httpbin # Kubernetes name of referenced API + namespace: default # Kubernetes namespace of referenced API + kind: ApiDefinition # Omit this field or use `ApiDefinition` if you are referencing Tyk Classic API + versions: + - Default # The default version of Tyk Classic API is "Default" + - name: petstore + namespace: default + kind: TykOasApiDefinition # Use `TykOasApiDefinition` if you are referencing Tyk OAS API + versions: + - "" # The default version of Tyk OAS API is "" + - name: http-to-kafka + namespace: default + kind: TykStreamsApiDefinition # Use `TykStreamsApiDefinition` if you are referencing Tyk Streams API + versions: + - "" # The default version of Tyk Streams API is "" + quota_max: 1000 + quota_renewal_rate: 3600 + rate: 120 + per: 60 + throttle_interval: -1 + throttle_retry_limit: -1 +``` + +Save the manifest locally in a file, e.g. `trial-policy.yaml` + +In this example, we have defined a security policy as described below: + +**Define Security Policy status and metadata** + + - **`name`**: A descriptive name for the security policy. + - **`active`**: Marks the policy as active (true or false). + - **`state`**: The current state of the policy. It can have one of three values: + - **`active`**: Keys connected to this policy are enabled and new keys can be created. + - **`draft`**: Keys connected to this policy are disabled; no new keys can be created. + - **`deny`**: Policy is not published to Gateway; no keys can be created. + - **`tags`**: A list of tags to categorize or label the security policy, e.g. + + ```yaml + tags: + - Hello + - World + ``` + + - **`meta_data`**: Key-value pairs for additional metadata related to the policy, e.g. + + ```yaml + meta_data: + key: value + hello: world + ``` + +**Define Access Lists for APIs** + + - **`access_rights_array`**: Defines the list of APIs that the security policy applies to and the versions of those APIs. + - **`name`**: The Kubernetes metadata name of the API resource to which the policy grants access. + - **`namespace`**: The Kubernetes namespace where the API resource is deployed. + - **`kind`**: Tyk OAS APIs (`TykOasApiDefinition`), Tyk Streams (`TykStreamsApiDefinition`) and Tyk Classic APIs (`ApiDefinition`) can be referenced here. The API format can be specified by `kind` field. If omitted, `ApiDefinition` is assumed. + - **`versions`**: Specifies the API versions the policy will cover. If the API is not versioned, include the default version here. The default version of a Classic API is "Default". The default version of a Tyk OAS API is "". + +In this example, the security policy will apply to an `ApiDefinition` resource named `httpbin` in the `default` namespace, a `TykOasApiDefinition` resource named `petstore` in the `default` namespace, and a `TykStreamsApiDefinition` resource named `http-to-kafka` in the `default` namespace. Note that with Tyk Operator, you do not need to specify API ID as in the raw [Policy definition](/api-management/policies#policies-guide). Tyk Operator will automatically retrieve the API ID of referenced API Definition resources for you. + +**Define Rate Limits, Usage Quota, and Throttling** + +- **`rate`**: The maximum number of requests allowed per time period (Set to `-1` to disable). +- **`per`**: The time period (in seconds) for the rate limit (Set to `-1` to disable). +- **`throttle_interval`**: The interval (in seconds) between each request retry (Set to `-1` to disable). +- **`throttle_retry_limit`**: The maximum number of retry attempts allowed (Set to `-1` to disable). +- **`quota_max`**: The maximum number of requests allowed over a quota period (Set to `-1` to disable). +- **`quota_renewal_rate`**: The time, in seconds, after which the quota is renewed. + +In this example, trial users under this security policy can gain access to the `httpbin` API at a rate limit of maximum 120 times per 60 seconds (`"rate": 120, "per": 60`), with a usage quota of 1000 every hour (`"quota_max": 1000, "quota_renewal_rate": 3600`), without any request throttling (`throttle_interval: -1, throttle_retry_limit: -1`). + +### Apply the Security Policy manifest +Once you have defined your security policy manifest, apply it to your Kubernetes cluster using the `kubectl apply` command: + +```bash +kubectl apply -f trial-policy.yaml +``` + +### Verify the Security Policy + +After applying the manifest, you can verify that the security policy has been created successfully by running: + +```bash +kubectl describe securitypolicy trial-policy + +... +Status: + Latest CRD Spec Hash: 901732141095659136 + Latest Tyk Spec Hash: 5475428707334545086 + linked_apis: + Kind: ApiDefinition + Name: httpbin + Namespace: default + Kind: TykOasApiDefinition + Name: petstore + Namespace: default + Kind: TykStreamsApiDefinition + Name: http-to-kafka + Namespace: default + pol_id: 66e9a27bfdd3040001af6246 +Events: +``` + +From the `status` field, you can see that this security policy has been linked to `httpbin`, `petstore`, and `http-to-kafka` APIs. + + +### Security policy example + + +#### Key-level per-API rate limits and quota + +By configuring per-API limits, you can set specific rate limits, quotas, and throttling rules for each API in the access rights array. When these per-API settings are enabled, the API inherits the global limit settings unless specific limits and quotas are set in the `limit` field for that API. + +The following manifest defines a security policy with per-API rate limits and quotas for two APIs: `httpbin` and `petstore`. + +```yaml {hl_lines=["15-21", "27-33", "40-41"],linenos=true} +apiVersion: tyk.tyk.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: policy-per-api-limits +spec: + name: Policy with Per API Limits + state: active + active: true + access_rights_array: + - name: httpbin # Kubernetes name of referenced API + namespace: default # Kubernetes namespace of referenced API + kind: ApiDefinition # `ApiDefinition` (Default), `TykOasApiDefinition` or `TykStreamsApiDefinition` + versions: + - Default # The default version of Tyk Classic API is "Default" + limit: # APILimit stores quota and rate limit on ACL level + rate: 10 # Max 10 requests per 60 seconds + per: 60 # Time period for rate limit + quota_max: 100 # Max 100 requests allowed over the quota period + quota_renewal_rate: 3600 # Quota renewal period in seconds (1 hour) + throttle_interval: -1 # No throttling between retries + throttle_retry_limit: -1 # No limit on request retries + - name: petstore + namespace: default + kind: TykOasApiDefinition # Use `TykOasApiDefinition` for Tyk OAS API + versions: + - "" # The default version of Tyk OAS API is "" + limit: + rate: 5 # Max 5 requests per 60 seconds + per: 60 # Time period for rate limit + quota_max: 100 # Max 100 requests allowed over the quota period + quota_renewal_rate: 3600 # Quota renewal period in seconds (1 hour) + throttle_interval: -1 # No throttling between retries + throttle_retry_limit: -1 # No limit on request retries + rate: -1 # Disable global rate limit + per: -1 # Disable global rate limit period + throttle_interval: -1 # Disable global throttling + throttle_retry_limit: -1 # Disable global retry limit + quota_max: -1 # Disable global quota + quota_renewal_rate: 60 # Quota renewal rate in seconds (1 minute) +``` + +With this security policy applied: + +For the `httpbin` API: +- The rate limit allows a maximum of 10 requests per 60 seconds. +- The quota allows a maximum of 100 requests per hour (3600 seconds). +- There is no throttling or retry limit (throttle_interval and throttle_retry_limit are set to -1). + +For the `petstore` API: +- The rate limit allows a maximum of 5 requests per 60 seconds. +- The quota allows a maximum of 100 requests per hour (3600 seconds). +- There is no throttling or retry limit (throttle_interval and throttle_retry_limit are set to -1). + +Global Rate Limits and Quota: +- All global limits (rate, quota, and throttling) are disabled (-1), so they do not apply. + +By setting per-API rate limits and quotas, you gain granular control over how each API is accessed and used, allowing you to apply different limits for different APIs as needed. This configuration is particularly useful when you want to ensure that critical APIs have stricter controls while allowing more flexibility for others. Use this example as a guideline to tailor your security policies to your specific requirements. + +#### Key-level per-endpoint rate limits + +By configuring key-level per-endpoint limits, you can restrict the request rate for specific API clients to a specific endpoint of an API. + +The following manifest defines a security policy with per-endpoint rate limits for two APIs: `httpbin` and `petstore`. + +```yaml {hl_lines=["15-29", "35-49"],linenos=true} +apiVersion: tyk.tyk.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: policy-per-api-limits +spec: + name: Policy with Per API Limits + state: active + active: true + access_rights_array: + - name: httpbin # Kubernetes name of referenced API + namespace: default # Kubernetes namespace of referenced API + kind: ApiDefinition # `ApiDefinition` (Default), `TykOasApiDefinition` or `TykStreamsApiDefinition` + versions: + - Default # The default version of Tyk Classic API is "Default" + endpoints: # Per-endpoint rate limits + - path: /anything + methods: + - name: POST + limit: + rate: 5 + per: 60 + - name: PUT + limit: + rate: 5 + per: 60 + - name: GET + limit: + rate: 10 + per: 60 + - name: petstore + namespace: default + kind: TykOasApiDefinition # Use `TykOasApiDefinition` for Tyk OAS API + versions: + - "" # The default version of Tyk OAS API is "" + endpoints: # Per-endpoint rate limits + - path: /pet + methods: + - name: POST + limit: + rate: 5 + per: 60 + - name: PUT + limit: + rate: 5 + per: 60 + - name: GET + limit: + rate: 10 + per: 60 + rate: -1 # Disable global rate limit + per: -1 # Disable global rate limit period + throttle_interval: -1 # Disable global throttling + throttle_retry_limit: -1 # Disable global retry limit + quota_max: -1 # Disable global quota + quota_renewal_rate: 60 # Quota renewal rate in seconds (1 minute) +``` + +#### Path based permissions + + +You can secure your APIs by specifying [allowed URLs](/api-management/policies#secure-your-apis-by-method-and-path) (methods and paths) for each API within a security policy. This is done using the `allowed_urls` field under `access_rights_array`. + +The following manifest defines a security policy that allows access only to specific URLs and HTTP methods for two APIs: `httpbin`(a Tyk Classic API) and `petstore` (a Tyk OAS API). + +```yaml {hl_lines=["15-18", "24-28"],linenos=true} +apiVersion: tyk.tyk.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: policy-with-allowed-urls +spec: + name: Policy with allowed URLs + state: active + active: true + access_rights_array: + - name: httpbin # Kubernetes name of referenced API + namespace: default # Kubernetes namespace of referenced API + kind: ApiDefinition # `ApiDefinition` (Default), `TykOasApiDefinition` or `TykStreamsApiDefinition` + versions: + - Default # The default version of Tyk Classic API is "Default" + allowed_urls: # Define allowed paths and methods + - url: /get # Only allow access to the "/get" path + methods: + - GET # Only allow the GET method + - name: petstore + namespace: default + kind: TykOasApiDefinition # Use `TykOasApiDefinition` for Tyk OAS API + versions: + - "" # The default version of Tyk OAS API is "" + allowed_urls: # Define allowed paths and methods + - url: "/pet/(.*)" # Allow access to any path starting with "/pet/" + methods: + - GET # Allow GET method + - POST # Allow POST method +``` + +With this security policy applied: + +- Allowed access: + - `curl -H "Authorization: Bearer $KEY_AUTH" http://tyk-gw.org/petstore/pet/10` returns a `200 OK` response. + - `curl -H "Authorization: Bearer $KEY_AUTH" http://tyk-gw.org/httpbin/get` returns a `200 OK` response. + +- Restricted access: + - `curl -H "Authorization: Bearer $KEY_AUTH" http://tyk-gw.org/petstore/pet` returns a `403 Forbidden` response with the message: + + ```json + { "error": "Access to this resource has been disallowed" } + ``` + + - `curl -H "Authorization: Bearer $KEY_AUTH" http://tyk-gw.org/httpbin/anything` returns a `403 Forbidden` response with the message: + + ```json + { "error": "Access to this resource has been disallowed" } + ``` + +#### Partitioned policies + + +[Partitioned policies](/api-management/policies#partitioned-policies) allow you to selectively enforce different segments of a security policy, such as quota, rate limiting, access control lists (ACL), and GraphQL complexity rules. This provides flexibility in applying different security controls as needed. + +To configure a partitioned policy, set the segments you want to enable in the `partitions` field: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: partitioned-policy-example +spec: + name: Partitioned Policy Example + state: active + active: true + access_rights_array: + - name: httpbin # Kubernetes name of referenced API + namespace: default # Kubernetes namespace of referenced API + kind: ApiDefinition # `ApiDefinition` (Default), `TykOasApiDefinition` or `TykStreamsApiDefinition` + versions: + - Default # The default version of Tyk Classic API is "Default" + - name: petstore + namespace: default + kind: TykOasApiDefinition # Use `TykOasApiDefinition` if you are referencing Tyk OAS API + versions: + - "" # The default version of Tyk OAS API is "" + partitions: + quota: false # Do not enforce quota rules + rate_limit: false # Do not enforce rate limiting rules + acl: true # Enforce access control rules + complexity: false # Do not enforce GraphQL complexity rules +``` + +- **`quota`**: Set to true to enforce quota rules (limits the number of requests allowed over a period). +- **`rate_limit`**: Set to true to enforce rate limiting rules (limits the number of requests per second or minute). +- **`acl`**: Set to true to enforce access control rules (controls which APIs or paths can be accessed). +- **`complexity`**: Set to true to enforce GraphQL complexity rules (limits the complexity of GraphQL queries to prevent resource exhaustion). + + + +## Migrate Existing APIs to Tyk Operator + +If you have existing APIs and Policies running on your Tyk platform, and you want to start using Tyk Operator to manage them, you probably would not want to re-create the APIs and Policies on the platform using Operator CRDs. It is because you will lose keys, policies, and analytics linked to the APIs. You can instead link existing APIs and Policies to a CRD by specifying the API ID or Policy ID in the CRD spec. This way, Operator will update the existing API or Policy according to the CRD spec. Any keys, policies and analytics linked to the API will continue to operate the same. This is great for idempotency. + +### Export existing configurations to CRDs + +Instead of creating the API and Policy CRDs from scratch, you can try exporting them from Dashboard using a snapshot tool. You can find the detail usage guide [here](https://github.com/TykTechnologies/tyk-operator/blob/master/pkg/snapshot/README.md). This is great if you want to have a quick start. However, this is still a PoC feature so we recommend you to double check the output files before applying them to your cluster. + +### Migration of existing API + +If there are existing APIs that you want to link to a CRD, it's very easy to do so. You need to simply add the `api_id` from your API Definition to the YAML of your `ApiDefinition` type. Then, the Operator will take care of the rest. + +Example: + +1. From the existing API Definition, grab the following field: + +```json +"api_id": "5e0fac4845bb46c77543be28300fd9d7" +``` + +2. Simply add this value to your YAML, in the `spec.api_id`field: + +```yaml +apiVersion: tyk.tyk.io/v1alpha1 +kind: ApiDefinition +metadata: + name: my-existing-api +spec: + api_id: 5e0fac4845bb46c77543be28300fd9d7 + name: existing API + protocol: http + active: true + proxy: + target_url: http://httpbin.org + listen_path: /httpbin + strip_listen_path: true +``` + +3. Then apply your changes: + +```console +$ kubectl apply -f config/samples/httpbin_protected.yaml +apidefinition.tyk.tyk.io/my-existing-api created +``` + + + +The source of truth for the API definition is now the CRD, meaning it will override any differences in your existing API definition. + + + +### Migration of existing Policy +If you have existing pre-Operator policies, you can easily link them to a CRD, which will allow you to modify them through the YAML moving forward. +Simply set the id field in the SecurityPolicy YAML to the _id field in the existing Policy's JSON. This will allow the Operator to make the link. +Note that the YAML becomes the source of truth and will overwrite any changes between it and the existing Policy. + +**Example**: +1. Find out your existing Policy ID, e.g. `5f8f3933f56e1a5ffe2cd58c` + +2. Stick the policy ID `5f8f3933f56e1a5ffe2cd58c` into the YAML's `spec.id` field like below + +```yaml +my-security-policy.yaml: +apiVersion: tyk.tyk.io/v1alpha1 +kind: SecurityPolicy +metadata: + name: new-httpbin-policy +spec: + id: 5f8f3933f56e1a5ffe2cd58c + name: My New HttpBin Policy + state: active + active: true + access_rights_array: + - name: new-httpbin-api # name of your ApiDefinition object. + namespace: default # namespace of your ApiDefinition object. + versions: + - Default +``` + +The `spec.access_rights_array` field of the YAML must refer to the ApiDefinition object that the policy identified by the id will affect. + +To find available ApiDefinition objects: + +```console +$ kubectl get tykapis -A +NAMESPACE NAME DOMAIN LISTENPATH PROXY.TARGETURL ENABLED +default new-httpbin-api /httpbin http://httpbin.org true +``` + +3. And then apply this file: + +```console +$ kubectl apply -f my-security-policy.yaml +securitypolicy.tyk.tyk.io/new-httpbin-policy created +``` + +Now the changes in the YAML were applied to the existing Policy. You can now manage this policy through the CRD moving forward. +Note, if this resource is unintentionally deleted, the Operator will recreate it with the same `id` field as above, allowing keys to continue to work as before the delete event. + +### Idempotency + +Because of the ability to declaratively define the `api_id`, this gives us the ability to preserve Keys that are tied to APIs or policies which are tied to APIs. +Imagine any use case where you have keys tied to policies, and policies tied to APIs. +Now imagine that these resources are unintentionally destroyed. Our database goes down, or our cluster, or something else. +Well, using the Tyk Operator, we can easily re-generate all our resources in a non-destructive fashion. That's because the operator intelligently constructs the unique ID using the unique namespaced name of our CRD resources. For that reason. +Alternatively, if you don't explicitly state it, it will be hard-coded for you by Base64 encoding the namespaced name of the CRD. + +For example: + +1. we have keys tied to policies tied to APIs in production. +2. Our production DB gets destroyed, all our Policies and APIs are wiped +3. The Tyk Operator can resync all the changes from our CRDs into a new environment, by explicitly defining the Policy IDs and API IDs as before. +4. This allows keys to continue to work normally as Tyk resources are generated idempotently through the Operator. + + diff --git a/tyk-stack/tyk-operator/installing-tyk-operator.mdx b/tyk-stack/tyk-operator/installing-tyk-operator.mdx new file mode 100644 index 000000000..c1108f0dd --- /dev/null +++ b/tyk-stack/tyk-operator/installing-tyk-operator.mdx @@ -0,0 +1,306 @@ +--- +title: "Install Tyk Operator" +description: "Learn how to install Tyk Operator on Kubernetes to manage your Tyk API configurations" +keywords: "Tyk Operator, Kubernetes, API Management" +sidebarTitle: "Installation" +--- + +## Introduction + +We assume you have already installed Tyk. If you don’t have it, check out [Tyk +Cloud](/tyk-cloud#quick-start-tyk-cloud) or [Tyk Self +Managed](/tyk-self-managed) page. [Tyk Helm +Chart](/product-stack/tyk-charts/overview) is the preferred (and easiest) way to install Tyk on Kubernetes. + +In order for policy ID matching to work correctly, Dashboard must have `allow_explicit_policy_id` and +`enable_duplicate_slugs` set to `true` and Gateway must have `policies.allow_explicit_policy_id` set to `true`. + +Tyk Operator needs a [user credential](/api-management/automations/operator#operator-user) to connect with +Tyk Dashboard. The Operator user should have write access to the resources it is going to manage, e.g. APIs, Certificates, +Policies, and Portal. It is the recommended practice to turn off write access for other users for the above resources. See +[Using Tyk Operator to enable GitOps with Tyk](/api-management/automations) about +maintaining a single source of truth for your API configurations. + +## Install cert-manager + +Tyk Operator uses cert-manager to provision certificates for the webhook server. If you don't have cert-manager +installed, you can follow this command to install it: + +Alternatively, you have the option to manually handle TLS certificates by disabling the `cert-manager` requirement. For more details, please refer to this [configuration](#webhook-configuration). + +```console +$ kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.8.0/cert-manager.yaml +``` + +Since Tyk Operator supports Kubernetes v1.19+, the minimum cert-manager version you can use is v1.8. If you run into the +cert-manager related errors, please ensure that the desired version of Kubernetes version works with the chosen version +of cert-manager by checking [supported releases page](https://cert-manager.io/docs/installation/supported-releases/) and +[cert-manager documentation](https://cert-manager.io/docs/installation/supported-releases/). + +Please wait for the cert-manager to become available before continuing with the next step. + +## Option 1: Install Tyk Operator via Tyk's Umbrella Helm Charts + +If you are using [Tyk Stack](/product-stack/tyk-charts/tyk-stack-chart), [Tyk Control +Plane](/product-stack/tyk-charts/tyk-control-plane-chart), or [Tyk Open +Source Chart](/product-stack/tyk-charts/tyk-oss-chart), you can install Tyk Operator alongside other Tyk +components by setting value `global.components.operator` to `true`. + +Starting from Tyk Operator v1.0, a license key is required to use the Tyk Operator. You can provide it while installing +Tyk Stack, Tyk Control Plane or Tyk OSS helm chart by setting `global.license.operator` field. You can also set license +key via a Kubernetes secret using `global.secrets.useSecretName` field. The secret should contain a key called +`OperatorLicense` + +Note: If you are using `global.secrets.useSecretName`, you must configure the operator license in the referenced Kubernetes secret. `global.license.operator` will not be used in this case. + +## Option 2: Install Tyk Operator via stand-alone Helm Chart + +If you prefer to install Tyk Operator separately, follow this section to install Tyk Operator using Helm. + +### Configure Tyk Operator via environment variable or tyk-operator-conf secret + +Tyk Operator configurations can be set using `envVars` field of helm chart. See the table below for a list of expected +environment variable names and example values. + +```yaml +envVars: + - name: TYK_OPERATOR_LICENSEKEY + value: "{YOUR_LICENSE_KEY}" + - name: TYK_MODE + value: "pro" + - name: TYK_URL + value: "http://dashboard-svc-tyk-tyk-dashboard.tyk.svc:3000" + - name: TYK_AUTH + value: "2d095c2155774fe36d77e5cbe3ac963b" + - name: TYK_ORG + value: "5e9d9544a1dcd60001d0ed20" +``` + +It can also be set via a Kubernetes secret. The default K8s secret name is `tyk-operator-conf`. If you want to use +another name, configure it through Helm Chart [envFrom](#install-tyk-operator-and-custom-resource-definitions-crds) value. + +The Kubernetes secret or envVars field should set the following keys: + + + + + +| Key | Mandatory | Example Value | Description | +| :--------------------------- | :-------- | :-------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------- | +| TYK_OPERATOR_LICENSEKEY | Yes | `` | Tyk Operator license key | +| TYK_MODE | Yes | pro | β€œce” for Tyk Open Source mode, β€œpro” for Tyk licensed mode. | +| TYK_URL | Yes | http://dashboard-svc-tyk-tyk-dashboard.tyk.svc:3000 | Management URL of Tyk Gateway (Open Source) or Tyk Dashboard | +| TYK_AUTH | Yes | 2d095c2155774fe36d77e5cbe3ac963b | Operator user API key. | +| TYK_ORG | Yes | 5e9d9544a1dcd60001d0ed20 | Operator user ORG ID. | +| TYK_TLS_INSECURE_SKIP_VERIFY | No | true | Set to `β€œtrue”` if the Tyk URL is HTTPS and has a self-signed certificate. If it isn't set, the default value is `false`. | +| WATCH_NAMESPACE | No | foo,bar | Comma separated list of namespaces for Operator to operate on. The default is to operate on all namespaces if not specified. | +| WATCH_INGRESS_CLASS | No | customclass | Define the ingress class Tyk Operator should watch. Default is `tyk` | +| TYK_HTTPS_INGRESS_PORT | No | 8443 | Define the ListenPort for HTTPS ingress. Default is `8443`. | +| TYK_HTTP_INGRESS_PORT | No | 8080 | Define the ListenPort for HTTP ingress. Default is `8080`. | + + + + + +**Note**: From Tyk Operator v1.0, although Tyk Operator is compatible with the Open Source Tyk Gateway, a valid license +key is required for running Tyk Operator. + +| Key | Mandatory | Example Value | Description | +| :--------------------------- | :-------- | :------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------- | +| TYK_OPERATOR_LICENSEKEY | Yes | `` | Tyk Operator license key | +| TYK_MODE | Yes | ce | β€œce” for Tyk Open Source mode, β€œpro” for Tyk licensed mode. | +| TYK_URL | Yes | http://gateway-svc-tyk-ce-tyk-gateway.tyk.svc:8080 | Management URL of Tyk Gateway (Open Source) or Tyk Dashboard | +| TYK_AUTH | Yes | myapisecret | Operator user API key. | +| TYK_ORG | Yes | myorgid | Operator user ORG ID. | +| TYK_TLS_INSECURE_SKIP_VERIFY | No | true | Set to `β€œtrue”` if the Tyk URL is HTTPS and has a self-signed certificate. If it isn't set, the default value is `false`. | +| WATCH_NAMESPACE | No | foo,bar | Comma separated list of namespaces for Operator to operate on. The default is to operate on all namespaces if not specified. | +| WATCH_INGRESS_CLASS | No | customclass | Define the ingress class Tyk Operator should watch. Default is `tyk` | +| TYK_HTTPS_INGRESS_PORT | No | 8443 | Define the ListenPort for HTTPS ingress. Default is `8443`. | +| TYK_HTTP_INGRESS_PORT | No | 8080 | Define the ListenPort for HTTP ingress. Default is `8080`. | + + + + + +**Connect to Tyk Gateway or Dashboard** + +If you install Tyk using Helm Chart, `tyk-operator-conf` will have been created with the following keys: +`TYK_OPERATOR_LICENSEKEY, TYK_AUTH, TYK_MODE, TYK_ORG`, and `TYK_URL` by default. If you didn't use Helm Chart for +installation, please prepare `tyk-operator-conf` secret yourself using the commands below: + +```console +$ kubectl create namespace tyk-operator-system + +$ kubectl create secret -n tyk-operator-system generic tyk-operator-conf \ + --from-literal "TYK_OPERATOR_LICENSEKEY=${TYK_OPERATOR_LICENSEKEY}" \ + --from-literal "TYK_AUTH=${TYK_AUTH}" \ + --from-literal "TYK_ORG=${TYK_ORG}" \ + --from-literal "TYK_MODE=${TYK_MODE}" \ + --from-literal "TYK_URL=${TYK_URL}" +``` + + + +User API key and Organization ID can be found under "Add / Edit User" page within Tyk Dashboard. `TYK_AUTH` corresponds +to Tyk Dashboard API Access Credentials. `TYK_ORG` corresponds to Organization ID. + + + + + +If the credentials embedded in the `tyk-operator-conf` are ever changed or updated, the tyk-operator-controller-manager +pod must be restarted to pick up these changes. + + + +**Watch Namespaces** + +Tyk Operator is installed with cluster permissions. However, you can optionally control which namespaces it watches by +setting the `WATCH_NAMESPACE` through `tyk-operator-conf` secret or the environment variable to a comma separated list +of k8s namespaces. For example: + +- `WATCH_NAMESPACE=""` will watch for resources across the entire cluster. +- `WATCH_NAMESPACE="foo"` will watch for resources in the `foo` namespace. +- `WATCH_NAMESPACE="foo,bar"` will watch for resources in the `foo` and `bar` namespace. + +**Watch custom ingress class** + +You can configure [Tyk Operator as Ingress Controller](/product-stack/tyk-operator/tyk-ingress-controller) so +that [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) resources can be managed by Tyk as +APIs. By default, Tyk Operator looks for the value `tyk` in Ingress resources `kubernetes.io/ingress.class` annotation +and will ignore all other ingress classes. If you want to override this default behavior, you may do so by setting +[WATCH_INGRESS_CLASS](#configure-tyk-operator-via-environment-variable-or-tyk-operator-conf-secret) through `tyk-operator-conf` or the environment variable. + +### Install Tyk Operator and Custom Resource Definitions (CRDs) + +You can install CRDs and Tyk Operator using the stand-alone Helm Chart by running the following command: + +```console +$ helm repo add tyk-helm https://helm.tyk.io/public/helm/charts/ +$ helm repo update + +$ helm install tyk-operator tyk-helm/tyk-operator -n tyk-operator-system +``` + +This process will deploy Tyk Operator and its required Custom Resource Definitions (CRDs) into your Kubernetes cluster +in `tyk-operator-system` namespace. + +**Helm configurations** + + + +Starting from Tyk Operator v1.2.0, `webhookPort` is deprecated in favor of `webhooks.port`. + + + +| Key | Type | Default | +| :------------------------------------------- | :------ | :-------------------------------------- | +| envFrom[0].secretRef.name | string | `"tyk-operator-conf"` | +| envVars[0].name | string | `"TYK_OPERATOR_LICENSEKEY"` | +| envVars[0].value | string | `"{OPERATOR_LICENSEKEY}"` | +| envVars[1].name | string | `"TYK_HTTPS_INGRESS_PORT"` | +| envVars[1].value | string | `"8443"` | +| envVars[2].name | string | `"TYK_HTTP_INGRESS_PORT"` | +| envVars[2].value | string | `"8080"` | +| extraVolumeMounts | list | `[]` | +| extraVolumes | list | `[]` | +| fullnameOverride | string | `""` | +| healthProbePort | int | `8081` | +| hostNetwork | bool | `false` | +| image.pullPolicy | string | `"IfNotPresent"` | +| image.repository | string | `"tykio/tyk-operator"` | +| image.tag | string | `"v1.0.0"` | +| imagePullSecrets | list | `[]` | +| metricsPort | int | `8080` | +| nameOverride | string | `""` | +| nodeSelector | object | `{}` | +| podAnnotations | object | `{}` | +| podSecurityContext.allowPrivilegeEscalation | bool | `false` | +| rbac.port | int | `8443` | +| rbac.resources | object | `{}` | +| replicaCount | int | `1` | +| resources | object | `{}` | +| serviceMonitor | bool | `false` | +| webhookPort | int | `9443` | +| webhooks.enabled | bool | `true` | +| webhooks.port | int | `9443` | +| webhooks.annotations | object | `{}` | +| webhooks.tls.useCertManager | bool | `true` | +| webhooks.tls.secretName | string | `webhook-server-cert` | +| webhooks.tls.certificatesMountPath | string | `/tmp/k8s-webhook-server/serving-certs`| + +## Upgrading Tyk Operator + +### Upgrading from v0.x to v1.0+ + +Starting from Tyk Operator v1.0, a valid license key is required for the Tyk Operator to function. If Tyk Operator is +upgraded from v0.x versions to one of v1.0+ versions, Tyk Operator needs a valid license key that needs to be provided +during upgrade process. This section describes how to set Tyk Operator license key to make sure Tyk Operator continues +functioning. + +To provide the license key for Tyk Operator, Kubernetes secret used to configure Tyk Operator (typically named +tyk-operator-conf as described above) requires an additional field called `TYK_OPERATOR_LICENSEKEY`. Populate this field +with your Tyk Operator license key. + +To configure the license key: + +1. Locate the Kubernetes Secret used to configure Tyk Operator (typically named `tyk-operator-conf`). +2. Add a new field called `TYK_OPERATOR_LICENSEKEY` to this Secret. +3. Set the value of `TYK_OPERATOR_LICENSEKEY` to your Tyk Operator license key. + +After updating the Kubernetes secret with this field, proceed with the standard upgrade process outlined below. + +### Upgrading Tyk Operator and CRDs + +You can upgrade Tyk Operator through Helm Chart by running the following command: + +```console +$ helm upgrade -n tyk-operator-system tyk-operator tyk-helm/tyk-operator --wait +``` + +[Helm does not upgrade or delete CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) +when performing an upgrade. Because of this restriction, an additional step is required when upgrading Tyk Operator with +Helm. + +```console +$ kubectl apply -f https://raw.githubusercontent.com/TykTechnologies/tyk-charts/refs/heads/main/tyk-operator-crds/crd-$TYK_OPERATOR_VERSION.yaml +``` + + + +Replace $TYK_OPERATOR_VERSION with the image tag corresponding to the Tyk Operator version to which +the Custom Resource Definitions (CRDs) belong. For example, to install CRDs compatible with Tyk Operator v1.0.0, set $TYK_OPERATOR_VERSION to v1.0.0. + + + + +## Uninstalling Tyk Operator + +To uninstall Tyk Operator, you need to run the following command: + +```console +$ helm delete tyk-operator -n tyk-operator-system +``` + +## Webhook Configuration + +Starting from Operator v1.2.0 release, [Kubernetes Webhooks](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers) can now be configured using the Helm chart by specifying the necessary settings in the values.yaml file of the operator. +Since webhooks are enabled by default, there will be no impact to existing users. + +``` +webhooks: + enabled: true + port: 9443 + annotations: {} + tls: + useCertManager: true + secretName: webhook-server-cert + certificatesMountPath: "/tmp/k8s-webhook-server/serving-certs" +``` +- `enabled`: Enables or disables webhooks. +- `port`: Specifies the port for webhook communication. +- `annotations`: Allows adding custom annotations. +- `tls.useCertManager`: If true, Cert-Manager will handle TLS certificates. +- `tls.secretName`: The name of the Kubernetes Secret storing the TLS certificate. +- `tls.certificatesMountPath`: Path where the webhook server mounts its certificates. + diff --git a/tyk-stack/tyk-operator/publish-an-api.mdx b/tyk-stack/tyk-operator/publish-an-api.mdx new file mode 100644 index 000000000..e89e49876 --- /dev/null +++ b/tyk-stack/tyk-operator/publish-an-api.mdx @@ -0,0 +1,141 @@ +--- +title: "Publish an API to Developer Portal with Tyk Operator" +description: "Learn how to publish an API to the Tyk Developer Portal using Tyk Operator in Kubernetes, enabling third-party developers to access your APIs." +keywords: "Tyk Operator, Developer Portal, API Management, Kubernetes" +sidebarTitle: "Publish an API" +--- + +## Introduction + +For Tyk Self Managed or Tyk Cloud, you can set up a Developer Portal to expose a facade of your APIs and then allow third-party developers to register and use your APIs. +You can make use of Tyk Operator CRDs to publish the APIs as part of your CI/CD workflow. If you have followed this Getting Started guide to create the httpbin example API, you can publish it to your Tyk Classic Developer Portal in a few steps. + + + +Currently Operator only supports publishing Tyk Classic API to the Tyk Classic Portal. + + + +## Publish an API with Tyk Operator + +### 1. Creating a security policy + +When you publish an API to the Portal, Tyk actually publishes a way for developers to enroll in a policy, not into the API directly. Therefore, you should first set up a security policy for the developers, before proceeding with the publishing. + +To do that, you can use the following command: + +```yml +cat <|"Views accounts, + initiates payments"| tykFAPI + tpp -->|"Integrates with, + consumes APIs from"| tykFAPI + aspsp -->|"Configures, monitors, + provides services through"| tykFAPI + + %% Styling + classDef person fill:#335FFD,color:#F7F7FF,stroke:#9393AA + classDef system fill:#00A3A0,color:#F7F7FF,stroke:#03031C + class psu,tpp,aspsp person + class tykFAPISystem system +``` + +### Tyk FAPI Accelerator + +The diagram below shows all major components of the Tyk FAPI Accelerator and their interactions. + +```mermaid +flowchart TB + %% People/Actors + psu(["PSU (Payment Services User) + A customer of the bank who accesses their accounts and initiates payments through third-party providers"]) + tpp(["TPP (Third Party Provider) + Companies providing financial services like account aggregators, credit checkers, and savings apps that integrate with banks"]) + + %% Tyk FAPI Accelerator System with Containers + subgraph tykFAPI ["Tyk FAPI Accelerator"] + tppApp["TPP Application + (NextJS) + Demonstrates how a TPP would interact with a bank's API"] + apiGateway["API Gateway + (Tyk Gateway) + Secures and routes API requests, enforces FAPI compliance, and handles event notifications"] + authServer["Authorization Server + (Keycloak) + Handles authentication and authorization"] + tykBank["Tyk Bank + (Node.js) + Mock bank implementation providing backend services"] + database[(Database)] + databaseLabel["PostgreSQL + Stores account information, payment data, and event subscriptions"] + kafka[(Message Broker)] + kafkaLabel["Kafka + Handles event notifications"] + end + + %% Connect labels to database and kafka + database --- databaseLabel + kafka --- kafkaLabel + + %% Relationships + psu -->|"Uses + (HTTPS)"| tppApp + tpp -->|"Develops + (IDE)"| tppApp + tppApp -->|"Makes API calls to + (HTTPS)"| apiGateway + tppApp -->|"Authenticates with + (OAuth 2.0/OIDC)"| authServer + apiGateway -->|"Routes requests to + (HTTPS)"| tykBank + authServer -->|"Verifies consents with + (HTTPS)"| tykBank + tykBank -->|"Reads from and writes to + (SQL)"| database + tykBank -->|"Publishes events to + (Kafka Protocol)"| kafka + + %% Event notification flow + kafka -->|"Subscribes to events"| apiGateway + apiGateway -->|"Sends signed notifications + (JWS/HTTPS Webhooks)"| tppApp + + %% Styling + classDef person fill:#335FFD,color:#F7F7FF,stroke:#9393AA + classDef tppStyle fill:#335FFD,color:#F7F7FF,stroke:#9393AA + classDef component fill:#00A3A0,color:#F7F7FF,stroke:#03031C + classDef authStyle fill:#00A3A0,color:#F7F7FF,stroke:#03031C + classDef bankStyle fill:#C01FB8,color:#F7F7FF,stroke:#03031C + classDef kafkaStyle fill:#E09D00,color:#F7F7FF,stroke:#03031C + classDef database fill:#5900CB,color:#F7F7FF,stroke:#03031C + classDef label fill:none,stroke:none + + class psu,tpp person + class tppApp tppStyle + class apiGateway component + class authServer authStyle + class tykBank bankStyle + class database database + class kafka kafkaStyle + class databaseLabel,kafkaLabel label +``` + +### Key Components + +1. **API Gateway (Tyk Gateway)**: + - Routes API requests to appropriate backend services + - Implements DPoP authentication via gRPC plugin + - Handles idempotency for payment requests + - Signs and delivers event notifications to TPPs + +2. **Authorization Server (Keycloak)**: + - Provides FAPI 2.0 compliant OAuth 2.0 and OpenID Connect + - Supports Pushed Authorization Requests (PAR) + - Manages user authentication and consent + +3. **Mock Bank Implementation**: + - Implements UK Open Banking Account Information API + - Implements UK Open Banking Payment Initiation API + - Implements UK Open Banking Event Subscriptions API + - Provides realistic testing environment + +4. **TPP Application**: + - Demonstrates how third parties integrate with the bank's APIs + - Implements FAPI 2.0 security profile + - Shows account information retrieval and payment initiation flows + +### Security Features + +The Tyk FAPI Accelerator implements several security features required for financial-grade APIs: + +1. **DPoP (Demonstrating Proof of Possession)**: + - Ensures the client possesses the private key corresponding to the public key in the token + - Prevents token theft and replay attacks + - Implemented as a gRPC plugin for Tyk Gateway + +2. **JWS Signing for Event Notifications**: + - Signs webhook notifications with JSON Web Signatures (JWS) + - Ensures authenticity and integrity of notifications + - Allows TPPs to verify the source of notifications + +3. **Idempotency Support**: + - Prevents duplicate transactions from repeated API calls + - Caches responses for idempotent requests + - Includes automatic garbage collection of expired entries + +4. **OAuth 2.0 with PAR**: + - Implements Pushed Authorization Requests for enhanced security + - Supports both automatic and manual authorization flows + - Complies with FAPI 2.0 security profile + + +## Getting Started + +For detailed setup instructions, code examples, and deployment guides, please refer to the [Tyk FAPI Accelerator GitHub repository](https://github.com/TykTechnologies/tyk-fapi/tree/main?tab=readme-ov-file#getting-started). + + +## Implementation Examples + +### Payment Flow Example + +The following sequence diagram illustrates a typical payment flow in the Tyk FAPI Accelerator: + +```mermaid +sequenceDiagram + actor User as End User + participant TPP as TPP Application + participant Gateway as API Gateway + participant Auth as Authorization Server + participant Bank as Tyk Bank + participant DB as Database + participant Kafka as Message Broker + + %% Payment Initiation + User->>TPP: 1. Initiate payment (amount, recipient) + + %% Payment Consent Creation + TPP->>Gateway: 2. Create payment consent + Gateway->>Bank: 3. Forward consent request + Bank->>DB: 4. Store consent + DB-->>Bank: 5. Return consent ID + Bank-->>Gateway: 6. Consent response with ConsentId + Gateway-->>TPP: 7. Return ConsentId + + %% Pushed Authorization Request (PAR) + TPP->>Auth: 8. Push Authorization Request (PAR) + Note right of TPP: Direct connection to Auth Server + Auth-->>TPP: 9. Return request_uri + + %% Authorization Options + TPP->>User: 10. Display authorization options + + %% Two possible authorization flows + alt Automatic Authorization + User->>TPP: 11a. Select automatic authorization + TPP->>Bank: 12a. Direct authorize consent request + Note right of TPP: Server-side authorization + Bank->>DB: 13a. Update consent status + DB-->>Bank: 14a. Confirm update + Bank-->>TPP: 15a. Authorization confirmation + else Manual Authorization + User->>TPP: 11b. Select manual authorization + TPP->>User: 12b. Redirect to authorization URL + User->>Auth: 13b. Authorization request with request_uri + Auth->>Bank: 14b. Verify consent + Bank->>DB: 15b. Get consent details + DB-->>Bank: 16b. Return consent details + Bank-->>Auth: 17b. Consent details + Auth->>User: 18b. Display authorization UI + User->>Auth: 19b. Approve authorization + Auth->>DB: 20b. Update consent status + DB-->>Auth: 21b. Confirm update + Auth->>User: 22b. Redirect to callback URL with code + User->>TPP: 23b. Callback with authorization code + end + + %% Payment Creation + TPP->>Gateway: 24. Create payment with authorized consent + Gateway->>Bank: 25. Forward payment request + Bank->>DB: 26. Store payment + DB-->>Bank: 27. Return payment ID + Bank-->>Gateway: 28. Payment response with PaymentId + Gateway-->>TPP: 29. Return PaymentId + + %% Payment Confirmation + TPP->>User: 30. Display payment confirmation + + %% Event Notification + Bank->>Kafka: 31. Publish payment event + Kafka->>Bank: 32. Stream processor consumes event + Bank->>DB: 33. Query subscriptions + DB-->>Bank: 34. Return matching subscriptions + Bank->>TPP: 35. Send payment notification + TPP-->>Bank: 36. Acknowledge notification +``` + +### Event Notification Example + +The event notification system allows TPPs to receive updates about payment status changes: + +```mermaid +sequenceDiagram + actor TPP as TPP Application + participant Gateway as API Gateway + participant EventAPI as Event Subscriptions API + participant PaymentAPI as Payment Initiation API + participant DB as Database + participant Kafka as Message Broker + + %% Subscription Registration + TPP->>Gateway: 1. Register callback URL + Gateway->>EventAPI: 2. Forward registration request + EventAPI->>DB: 3. Store subscription + DB-->>EventAPI: 4. Return subscription ID + EventAPI-->>Gateway: 5. Registration response with SubscriptionId + Gateway-->>TPP: 6. Return SubscriptionId + + %% Event Generation + Note over PaymentAPI: Payment status change or other event occurs + PaymentAPI->>Kafka: 7. Publish event + Note right of PaymentAPI: Event includes type, subject, timestamp + + %% Event Processing + Kafka-->>Gateway: 8. Consume event + Gateway->>DB: 9. Query subscriptions for event type + DB-->>Gateway: 10. Return matching subscriptions + Gateway->>Gateway: 11. Determine target TPPs and sign with JWS + + %% Notification Delivery + Gateway->>TPP: 12. Send signed notification + Note right of Gateway: Notification includes event details, links, and JWS signature + TPP->>TPP: 13. Verify JWS signature + TPP-->>Gateway: 14. Acknowledge (HTTP 200 OK) + + %% Error Handling (Alternative Flow) + alt Delivery Failure + Gateway->>TPP: 12. Send signed notification + TPP--xGateway: 13. Failed delivery (timeout/error) + Gateway->>Gateway: 14. Retry with exponential backoff + Gateway->>TPP: 15. Retry notification + TPP->>TPP: 16. Verify JWS signature + TPP-->>Gateway: 17. Acknowledge (HTTP 200 OK) + end +```