diff --git a/.github/workflows/arc-publish-chart.yaml b/.github/workflows/arc-publish-chart.yaml
deleted file mode 100644
index 41e5c5b0fa..0000000000
--- a/.github/workflows/arc-publish-chart.yaml
+++ /dev/null
@@ -1,212 +0,0 @@
-name: Publish ARC Helm Charts
-
-# Revert to https://github.com/actions-runner-controller/releases#releases
-# for details on why we use this approach
-on:
-  push:
-    branches:
-    - master
-    paths:
-    - 'charts/**'
-    - '.github/workflows/arc-publish-chart.yaml'
-    - '!charts/actions-runner-controller/docs/**'
-    - '!charts/gha-runner-scale-set-controller/**'
-    - '!charts/gha-runner-scale-set/**'
-    - '!**.md'
-  workflow_dispatch:
-    inputs:
-      force:
-        description: 'Force publish even if the chart version is not bumped'
-        type: boolean
-        required: true
-        default: false
-
-env:
-  KUBE_SCORE_VERSION: 1.10.0
-  HELM_VERSION: v3.8.0
-
-permissions:
-  contents: write
-
-concurrency:
-  group: ${{ github.workflow }}
-  cancel-in-progress: true
-
-jobs:
-  lint-chart:
-    name: Lint Chart
-    runs-on: ubuntu-latest
-    outputs:
-      publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
-    steps:
-    - name: Checkout
-      uses: actions/checkout@v3
-      with:
-        fetch-depth: 0
-
-    - name: Set up Helm
-      uses: azure/setup-helm@v3.4
-      with:
-        version: ${{ env.HELM_VERSION }}
-
-    - name: Set up kube-score
-      run: |
-        wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
-        chmod 755 kube-score
-
-    - name: Kube-score generated manifests
-      run: helm template  --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
-
-    # python is a requirement for the chart-testing action below (supports yamllint among other tests)
-    - uses: actions/setup-python@v4
-      with:
-        python-version: '3.11'
-
-    - name: Set up chart-testing
-      uses: helm/chart-testing-action@v2.6.0
-
-    - name: Run chart-testing (list-changed)
-      id: list-changed
-      run: |
-        changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
-        if [[ -n "$changed" ]]; then
-          echo "changed=true" >> $GITHUB_OUTPUT
-        fi
-
-    - name: Run chart-testing (lint)
-      run: |
-        ct lint --config charts/.ci/ct-config.yaml
-
-    - name: Create kind cluster
-      if: steps.list-changed.outputs.changed == 'true'
-      uses: helm/kind-action@v1.4.0
-
-    # We need cert-manager already installed in the cluster because we assume the CRDs exist
-    - name: Install cert-manager
-      if: steps.list-changed.outputs.changed == 'true'
-      run: |
-        helm repo add jetstack https://charts.jetstack.io --force-update
-        helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
-
-    - name: Run chart-testing (install)
-      if: steps.list-changed.outputs.changed == 'true'
-      run: ct install --config charts/.ci/ct-config.yaml
-
-    # WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
-    - name: Check if Chart Publish is Needed
-      id: publish-chart-step
-      run: |
-        CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
-        NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
-        RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases  | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
-        LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
-
-        echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
-        echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
-
-        # Always publish if force is true
-        if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
-          echo "publish=true" >> $GITHUB_OUTPUT
-        else
-          echo "publish=false" >> $GITHUB_OUTPUT
-        fi
-
-    - name: Job summary
-      run: |
-        echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
-        echo "" >> $GITHUB_STEP_SUMMARY
-        echo "**Status:**" >> $GITHUB_STEP_SUMMARY
-        echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
-        echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
-        echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
-
-  publish-chart:
-    if: needs.lint-chart.outputs.publish-chart == 'true'
-    needs: lint-chart
-    name: Publish Chart
-    runs-on: ubuntu-latest
-    permissions:
-      contents: write # for helm/chart-releaser-action to push chart release and create a release
-    env:
-      CHART_TARGET_ORG: actions-runner-controller
-      CHART_TARGET_REPO: actions-runner-controller.github.io
-      CHART_TARGET_BRANCH: master
-
-    steps:
-    - name: Checkout
-      uses: actions/checkout@v3
-      with:
-        fetch-depth: 0
-
-    - name: Configure Git
-      run: |
-        git config user.name "$GITHUB_ACTOR"
-        git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
-
-    - name: Get Token
-      id: get_workflow_token
-      uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
-      with:
-        application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
-        application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
-        organization: ${{ env.CHART_TARGET_ORG }}
-
-    - name: Install chart-releaser
-      uses: helm/chart-releaser-action@v1.4.1
-      with:
-        install_only: true
-        install_dir: ${{ github.workspace }}/bin
-
-    - name: Package and upload release assets
-      run: |
-        cr package \
-          ${{ github.workspace }}/charts/actions-runner-controller/ \
-          --package-path .cr-release-packages
-
-        cr upload \
-          --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
-          --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
-          --package-path .cr-release-packages \
-          --token ${{ secrets.GITHUB_TOKEN }}
-
-    - name: Generate updated index.yaml
-      run: |
-        cr index \
-          --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
-          --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
-          --index-path ${{ github.workspace }}/index.yaml \
-          --token ${{ secrets.GITHUB_TOKEN }} \
-          --push \
-          --pages-branch 'gh-pages' \
-          --pages-index-path 'index.yaml'
-
-    # Chart Release was never intended to publish to a different repo
-    # this workaround is intended to move the index.yaml to the target repo
-    # where the github pages are hosted
-    - name: Checkout target repository
-      uses: actions/checkout@v3
-      with:
-        repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
-        path: ${{ env.CHART_TARGET_REPO }}
-        ref: ${{ env.CHART_TARGET_BRANCH }}
-        token: ${{ steps.get_workflow_token.outputs.token }}
-
-    - name: Copy index.yaml
-      run: |
-        cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
-
-    - name: Commit and push to target repository
-      run: |
-        git config user.name "$GITHUB_ACTOR"
-        git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
-        git add .
-        git commit -m "Update index.yaml"
-        git push
-      working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
-
-    - name: Job summary
-      run: |
-        echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
-        echo "" >> $GITHUB_STEP_SUMMARY
-        echo "**Status:**" >> $GITHUB_STEP_SUMMARY
-        echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/arc-publish.yaml b/.github/workflows/arc-publish.yaml
deleted file mode 100644
index 6a83f2a93c..0000000000
--- a/.github/workflows/arc-publish.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-name: Publish ARC Image
-
-# Revert to https://github.com/actions-runner-controller/releases#releases
-# for details on why we use this approach
-on:
-  release:
-    types:
-      - published
-  workflow_dispatch:
-    inputs:
-      release_tag_name:
-        description: 'Tag name of the release to publish'
-        required: true
-      push_to_registries:
-        description: 'Push images to registries'
-        required: true
-        type: boolean
-        default: false
-
-permissions:
- contents: write
- packages: write
-
-env:
-  TARGET_ORG: actions-runner-controller
-  TARGET_REPO: actions-runner-controller
-
-concurrency:
-  group: ${{ github.workflow }}
-  cancel-in-progress: true
-
-jobs:
-  release-controller:
-    name: Release
-    runs-on: ubuntu-latest
-    # gha-runner-scale-set has its own release workflow.
-    # We don't want to publish a new actions-runner-controller image
-    # we release gha-runner-scale-set.
-    if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v3
-
-      - uses: actions/setup-go@v4
-        with:
-          go-version-file: 'go.mod'
-
-      - name: Install tools
-        run: |
-          curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.2.0/kubebuilder_2.2.0_linux_amd64.tar.gz
-          tar zxvf kubebuilder_2.2.0_linux_amd64.tar.gz
-          sudo mv kubebuilder_2.2.0_linux_amd64 /usr/local/kubebuilder
-          curl -s https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh | bash
-          sudo mv kustomize /usr/local/bin
-          curl -L -O https://github.com/tcnksm/ghr/releases/download/v0.13.0/ghr_v0.13.0_linux_amd64.tar.gz
-          tar zxvf ghr_v0.13.0_linux_amd64.tar.gz
-          sudo mv ghr_v0.13.0_linux_amd64/ghr /usr/local/bin
-
-      - name: Set version env variable
-        run: |
-          # Define the release tag name based on the event type
-          if [[ "${{ github.event_name }}" == "release" ]]; then
-            echo "VERSION=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV
-          elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
-            echo "VERSION=${{ inputs.release_tag_name }}" >> $GITHUB_ENV
-          fi
-
-      - name: Upload artifacts
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        run: |
-          make github-release
-
-      - name: Get Token
-        id: get_workflow_token
-        uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
-        with:
-          application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
-          application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
-          organization: ${{ env.TARGET_ORG }}
-
-      - name: Resolve push to registries
-        run: |
-          # Define the push to registries based on the event type
-          if [[ "${{ github.event_name }}" == "release" ]]; then
-            echo "PUSH_TO_REGISTRIES=true" >> $GITHUB_ENV
-          elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
-            echo "PUSH_TO_REGISTRIES=${{ inputs.push_to_registries }}" >> $GITHUB_ENV
-          fi
-
-      - name: Trigger Build And Push Images To Registries
-        run: |
-          # Authenticate
-          gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
-
-          # Trigger the workflow run
-          jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.VERSION }}", "push_to_registries": "${{ env.PUSH_TO_REGISTRIES }}" }}' \
-            | gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
-
-      - name: Job summary
-        run: |
-          echo "The [publish-arc](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
-          echo "- Release tag: ${{ env.VERSION }}" >> $GITHUB_STEP_SUMMARY
-          echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Status:**" >> $GITHUB_STEP_SUMMARY
-          echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml)" >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/arc-release-runners.yaml b/.github/workflows/arc-release-runners.yaml
deleted file mode 100644
index 253793abb4..0000000000
--- a/.github/workflows/arc-release-runners.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-name: Release ARC Runner Images
-
-# Revert to https://github.com/actions-runner-controller/releases#releases
-# for details on why we use this approach
-on:
-  # We must do a trigger on a push: instead of a types: closed so GitHub Secrets
-  # are available to the workflow run
-  push:
-    branches:
-      - 'master'
-    paths:
-      - 'runner/VERSION'
-      - '.github/workflows/arc-release-runners.yaml'
-
-env:
-  # Safeguard to prevent pushing images to registeries after build
-  PUSH_TO_REGISTRIES: true
-  TARGET_ORG: actions-runner-controller
-  TARGET_WORKFLOW: release-runners.yaml
-  DOCKER_VERSION: 24.0.7
-
-concurrency:
-  group: ${{ github.workflow }}
-  cancel-in-progress: true
-
-jobs:
-  build-runners:
-    name: Trigger Build and Push of Runner Images
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - name: Get runner version
-        id: versions
-        run: |
-          runner_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
-          container_hooks_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
-          echo runner_version=$runner_current_version >> $GITHUB_OUTPUT
-          echo container_hooks_version=$container_hooks_current_version >> $GITHUB_OUTPUT
-
-      - name: Get Token
-        id: get_workflow_token
-        uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
-        with:
-          application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
-          application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
-          organization: ${{ env.TARGET_ORG }}
-
-      - name: Trigger Build And Push Runner Images To Registries
-        env:
-          RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
-          CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
-        run: |
-          # Authenticate
-          gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
-
-          # Trigger the workflow run
-          gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \
-            -f runner_version=${{ env.RUNNER_VERSION }} \
-            -f docker_version=${{ env.DOCKER_VERSION }} \
-            -f runner_container_hooks_version=${{ env.CONTAINER_HOOKS_VERSION }} \
-            -f sha='${{ github.sha }}' \
-            -f push_to_registries=${{ env.PUSH_TO_REGISTRIES }}
-
-      - name: Job summary
-        env:
-          RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
-          CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
-        run: |
-          echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
-          echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY
-          echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY
-          echo "- runner_container_hooks_version: ${{ env.CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
-          echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
-          echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Status:**" >> $GITHUB_STEP_SUMMARY
-          echo "[https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml)" >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/arc-update-runners-scheduled.yaml b/.github/workflows/arc-update-runners-scheduled.yaml
deleted file mode 100644
index bce555c485..0000000000
--- a/.github/workflows/arc-update-runners-scheduled.yaml
+++ /dev/null
@@ -1,153 +0,0 @@
-# This workflows polls releases from actions/runner and in case of a new one it
-# updates files containing runner version and opens a pull request.
-name: Runner Updates Check (Scheduled Job)
-
-on:
-  schedule:
-    # run daily
-    - cron: "0 9 * * *"
-  workflow_dispatch:
-
-jobs:
-  # check_versions compares our current version and the latest available runner
-  # version and sets them as outputs.
-  check_versions:
-    runs-on: ubuntu-latest
-    env:
-      GH_TOKEN: ${{ github.token }}
-    outputs:
-      runner_current_version: ${{ steps.runner_versions.outputs.runner_current_version }}
-      runner_latest_version: ${{ steps.runner_versions.outputs.runner_latest_version }}
-      container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
-      container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
-    steps:
-      - uses: actions/checkout@v3
-
-      - name: Get runner current and latest versions
-        id: runner_versions
-        run: |
-          CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
-          echo "Current version: $CURRENT_VERSION"
-          echo runner_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
-
-          LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
-          echo "Latest version: $LATEST_VERSION"
-          echo runner_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
-
-      - name: Get container-hooks current and latest versions
-        id: container_hooks_versions
-        run: |
-          CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
-          echo "Current version: $CURRENT_VERSION"
-          echo container_hooks_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
-
-          LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner-container-hooks | grep -oP '(?<=v)[0-9.]+' | head -1)
-          echo "Latest version: $LATEST_VERSION"
-          echo container_hooks_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
-
-  # check_pr checks if a PR for the same update already exists. It only runs if
-  # runner latest version != our current version. If no existing PR is found,
-  # it sets a PR name as output.
-  check_pr:
-    runs-on: ubuntu-latest
-    needs: check_versions
-    if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version
-    outputs:
-      pr_name: ${{ steps.pr_name.outputs.pr_name }}
-    env:
-      GH_TOKEN: ${{ github.token }}
-    steps:
-      - name: debug
-        run:
-          echo "RUNNER_CURRENT_VERSION=${{ needs.check_versions.outputs.runner_current_version }}"
-          echo "RUNNER_LATEST_VERSION=${{ needs.check_versions.outputs.runner_latest_version }}"
-          echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
-          echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
-
-      - uses: actions/checkout@v3
-
-      - name: PR Name
-        id: pr_name
-        env:
-          RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
-          RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
-          CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
-          CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
-        # Generate a PR name with the following title:
-        # Updates: runner to v2.304.0 and container-hooks to v0.3.1
-        run: |
-          RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
-          CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
-
-          PR_NAME="Updates:"
-          if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
-          then
-            PR_NAME="$PR_NAME $RUNNER_MESSAGE"
-          fi
-          if [ "$CONTAINER_HOOKS_CURRENT_VERSION" != "$CONTAINER_HOOKS_LATEST_VERSION" ]
-          then
-            PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
-          fi
-
-          result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
-          if [ -z "$result" ]
-          then
-            echo "No existing PRs found, setting output with pr_name=$PR_NAME"
-            echo pr_name=$PR_NAME >> $GITHUB_OUTPUT
-          else
-            echo "Found a PR with title '$PR_NAME' already existing: ${{ github.server_url }}/${{ github.repository }}/pull/$result"
-          fi
-
-  # update_version updates runner version in the files listed below, commits
-  # the changes and opens a pull request as `github-actions` bot.
-  update_version:
-    runs-on: ubuntu-latest
-    needs:
-      - check_versions
-      - check_pr
-    if: needs.check_pr.outputs.pr_name
-    permissions:
-      pull-requests: write
-      contents: write
-      actions: write
-    env:
-      GH_TOKEN: ${{ github.token }}
-      RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
-      RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
-      CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
-      CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
-      PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
-
-    steps:
-      - uses: actions/checkout@v3
-
-      - name: New branch
-        run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
-
-      - name: Update files
-        run: |
-          CURRENT_VERSION="${RUNNER_CURRENT_VERSION//./\\.}"
-          LATEST_VERSION="${RUNNER_LATEST_VERSION//./\\.}"
-          sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
-          sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
-          sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
-          sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
-
-          CURRENT_VERSION="${CONTAINER_HOOKS_CURRENT_VERSION//./\\.}"
-          LATEST_VERSION="${CONTAINER_HOOKS_LATEST_VERSION//./\\.}"
-          sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
-          sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
-          sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
-          sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
-
-      - name: Commit changes
-        run: |
-          # from https://github.com/orgs/community/discussions/26560
-          git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
-          git config user.name "github-actions[bot]"
-          git add .
-          git commit -m "$PR_NAME"
-          git push -u origin HEAD
-
-      - name: Create pull request
-        run: gh pr create -f -l "runners update"
diff --git a/.github/workflows/arc-validate-chart.yaml b/.github/workflows/arc-validate-chart.yaml
deleted file mode 100644
index dc04eab45d..0000000000
--- a/.github/workflows/arc-validate-chart.yaml
+++ /dev/null
@@ -1,103 +0,0 @@
-name: Validate Helm Chart
-
-on:
-  pull_request:
-    branches:
-      - master
-    paths:
-      - 'charts/**'
-      - '.github/workflows/arc-validate-chart.yaml'
-      - '!charts/actions-runner-controller/docs/**'
-      - '!**.md'
-      - '!charts/gha-runner-scale-set-controller/**'
-      - '!charts/gha-runner-scale-set/**'
-  push:
-    paths:
-      - 'charts/**'
-      - '.github/workflows/arc-validate-chart.yaml'
-      - '!charts/actions-runner-controller/docs/**'
-      - '!**.md'
-      - '!charts/gha-runner-scale-set-controller/**'
-      - '!charts/gha-runner-scale-set/**'
-  workflow_dispatch:
-env:
-  KUBE_SCORE_VERSION: 1.10.0
-  HELM_VERSION: v3.8.0
-
-permissions:
-  contents: read
-
-concurrency:
-  # This will make sure we only apply the concurrency limits on pull requests
-  # but not pushes to master branch by making the concurrency group name unique
-  # for pushes
-  group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
-  cancel-in-progress: true
-
-jobs:
-  validate-chart:
-    name: Lint Chart
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 0
-
-      - name: Set up Helm
-        # Using https://github.com/Azure/setup-helm/releases/tag/v3.5
-        uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
-        with:
-          version: ${{ env.HELM_VERSION }}
-
-      - name: Set up kube-score
-        run: |
-          wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
-          chmod 755 kube-score
-
-      - name: Kube-score generated manifests
-        run: helm template  --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
-              --ignore-test pod-networkpolicy
-              --ignore-test deployment-has-poddisruptionbudget
-              --ignore-test deployment-has-host-podantiaffinity
-              --ignore-test container-security-context
-              --ignore-test pod-probes
-              --ignore-test container-image-tag
-              --enable-optional-test container-security-context-privileged
-              --enable-optional-test container-security-context-readonlyrootfilesystem
-
-      # python is a requirement for the chart-testing action below (supports yamllint among other tests)
-      - uses: actions/setup-python@v4
-        with:
-          python-version: '3.11'
-
-      - name: Set up chart-testing
-        uses: helm/chart-testing-action@v2.6.0
-
-      - name: Run chart-testing (list-changed)
-        id: list-changed
-        run: |
-          changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
-          if [[ -n "$changed" ]]; then
-            echo "changed=true" >> $GITHUB_OUTPUT
-          fi
-
-      - name: Run chart-testing (lint)
-        run: |
-          ct lint --config charts/.ci/ct-config.yaml
-
-      - name: Create kind cluster
-        uses: helm/kind-action@v1.4.0
-        if: steps.list-changed.outputs.changed == 'true'
-
-      # We need cert-manager already installed in the cluster because we assume the CRDs exist
-      - name: Install cert-manager
-        if: steps.list-changed.outputs.changed == 'true'
-        run: |
-          helm repo add jetstack https://charts.jetstack.io --force-update
-          helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
-
-      - name: Run chart-testing (install)
-        if: steps.list-changed.outputs.changed == 'true'
-        run: |
-          ct install --config charts/.ci/ct-config.yaml
diff --git a/.github/workflows/arc-validate-runners.yaml b/.github/workflows/arc-validate-runners.yaml
deleted file mode 100644
index 562320f642..0000000000
--- a/.github/workflows/arc-validate-runners.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-name: Validate ARC Runners
-
-on:
-  pull_request:
-    branches:
-      - '**'
-    paths:
-      - 'runner/**'
-      - 'test/startup/**'
-      - '!**.md'
-
-permissions:
-  contents: read
-
-concurrency:
-  # This will make sure we only apply the concurrency limits on pull requests 
-  # but not pushes to master branch by making the concurrency group name unique
-  # for pushes
-  group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
-  cancel-in-progress: true
-
-jobs:
-  shellcheck:
-    name: runner / shellcheck
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - name: shellcheck
-        uses: reviewdog/action-shellcheck@v1
-        with:
-          github_token: ${{ secrets.GITHUB_TOKEN }}
-          path: "./runner"
-          pattern: |
-            *.sh
-            *.bash
-            update-status
-          # Make this consistent with `make shellsheck`
-          shellcheck_flags: "--shell bash --source-path runner"
-          exclude: "./.git/*"
-          check_all_files_with_shebangs: "false"
-          # Set this to "true" once we addressed all the shellcheck findings
-          fail_on_error: "false"
-  test-runner-entrypoint:
-    name: Test entrypoint
-    runs-on: ubuntu-latest
-    steps:
-    - name: Checkout
-      uses: actions/checkout@v3
-
-    - name: Run tests
-      run: |
-        make acceptance/runner/startup
diff --git a/.github/workflows/gha-publish-chart.yaml b/.github/workflows/gha-publish-chart.yaml
deleted file mode 100644
index 8d893e0296..0000000000
--- a/.github/workflows/gha-publish-chart.yaml
+++ /dev/null
@@ -1,212 +0,0 @@
-name: (gha) Publish Helm Charts
-
-on:
-  workflow_dispatch:
-    inputs:
-      ref:
-        description: 'The branch, tag or SHA to cut a release from'
-        required: false
-        type: string
-        default: ''
-      release_tag_name:
-        description: 'The name to tag the controller image with'
-        required: true
-        type: string
-        default: 'canary'
-      push_to_registries:
-        description: 'Push images to registries'
-        required: true
-        type: boolean
-        default: false
-      publish_gha_runner_scale_set_controller_chart:
-        description: 'Publish new helm chart for gha-runner-scale-set-controller'
-        required: true
-        type: boolean
-        default: false
-      publish_gha_runner_scale_set_chart:
-        description: 'Publish new helm chart for gha-runner-scale-set'
-        required: true
-        type: boolean
-        default: false
-
-env:
-  HELM_VERSION: v3.8.0
-
-permissions:
-  packages: write
-
-concurrency:
-  group: ${{ github.workflow }}
-  cancel-in-progress: true
-
-jobs:
-  build-push-image:
-    name: Build and push controller image
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v3
-        with:
-          # If inputs.ref is empty, it'll resolve to the default branch
-          ref: ${{ inputs.ref }}
-
-      - name: Check chart versions
-        # Binary version and chart versions need to match.
-        # In case of an upgrade, the controller will try to clean up
-        # resources with older versions that should have been cleaned up
-        # during the upgrade process
-        run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }}
-
-      - name: Resolve parameters
-        id: resolve_parameters
-        run: |
-          resolvedRef="${{ inputs.ref }}"
-          if [ -z "$resolvedRef" ]
-          then
-            resolvedRef="${{ github.ref }}"
-          fi
-          echo "resolved_ref=$resolvedRef" >> $GITHUB_OUTPUT
-          echo "INFO: Resolving short SHA for $resolvedRef"
-          echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
-          echo "INFO: Normalizing repository name (lowercase)"
-          echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
-
-      - name: Set up QEMU
-        uses: docker/setup-qemu-action@v2
-
-      - name: Set up Docker Buildx
-        uses: docker/setup-buildx-action@v2
-        with:
-          # Pinning v0.9.1 for Buildx and BuildKit v0.10.6
-          # BuildKit v0.11 which has a bug causing intermittent
-          # failures pushing images to GHCR
-          version: v0.9.1
-          driver-opts: image=moby/buildkit:v0.10.6
-
-      - name: Login to GitHub Container Registry
-        uses: docker/login-action@v2
-        with:
-          registry: ghcr.io
-          username: ${{ github.actor }}
-          password: ${{ secrets.GITHUB_TOKEN }}
-
-      - name: Build & push controller image
-        uses: docker/build-push-action@v3
-        with:
-          file: Dockerfile
-          platforms: linux/amd64,linux/arm64
-          build-args: VERSION=${{ inputs.release_tag_name }}
-          push: ${{ inputs.push_to_registries }}
-          tags: |
-            ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}
-            ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
-          cache-from: type=gha
-          cache-to: type=gha,mode=max
-
-      - name: Job summary
-        run: |
-          echo "The [gha-publish-chart.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/gha-publish-chart.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
-          echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
-          echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
-          echo "- Release tag: ${{ inputs.release_tag_name }}" >> $GITHUB_STEP_SUMMARY
-          echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-
-  publish-helm-chart-gha-runner-scale-set-controller:
-    if: ${{ inputs.publish_gha_runner_scale_set_controller_chart == true }}
-    needs: build-push-image
-    name: Publish Helm chart for gha-runner-scale-set-controller
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v3
-        with:
-          # If inputs.ref is empty, it'll resolve to the default branch
-          ref: ${{ inputs.ref }}
-
-      - name: Resolve parameters
-        id: resolve_parameters
-        run: |
-          resolvedRef="${{ inputs.ref }}"
-          if [ -z "$resolvedRef" ]
-          then
-            resolvedRef="${{ github.ref }}"
-          fi
-          echo "INFO: Resolving short SHA for $resolvedRef"
-          echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
-          echo "INFO: Normalizing repository name (lowercase)"
-          echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
-
-      - name: Set up Helm
-        # Using https://github.com/Azure/setup-helm/releases/tag/v3.5
-        uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
-        with:
-          version: ${{ env.HELM_VERSION }}
-
-      - name: Publish new helm chart for gha-runner-scale-set-controller
-        run: |
-          echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
-          GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set-controller/Chart.yaml | grep version: | cut -d " " -f 2)
-          echo "GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}" >> $GITHUB_ENV
-          helm package charts/gha-runner-scale-set-controller/ --version="${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}"
-          helm push gha-runner-scale-set-controller-"${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
-
-      - name: Job summary
-        run: |
-          echo "New helm chart for gha-runner-scale-set-controller published successfully!" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
-          echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
-          echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
-          echo "- gha-runner-scale-set-controller Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
-
-  publish-helm-chart-gha-runner-scale-set:
-    if: ${{ inputs.publish_gha_runner_scale_set_chart == true }}
-    needs: build-push-image
-    name: Publish Helm chart for gha-runner-scale-set
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v3
-        with:
-          # If inputs.ref is empty, it'll resolve to the default branch
-          ref: ${{ inputs.ref }}
-
-      - name: Resolve parameters
-        id: resolve_parameters
-        run: |
-          resolvedRef="${{ inputs.ref }}"
-          if [ -z "$resolvedRef" ]
-          then
-            resolvedRef="${{ github.ref }}"
-          fi
-          echo "INFO: Resolving short SHA for $resolvedRef"
-          echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
-          echo "INFO: Normalizing repository name (lowercase)"
-          echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
-
-      - name: Set up Helm
-        # Using https://github.com/Azure/setup-helm/releases/tag/v3.5
-        uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
-        with:
-          version: ${{ env.HELM_VERSION }}
-
-      - name: Publish new helm chart for gha-runner-scale-set
-        run: |
-          echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
-
-          GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set/Chart.yaml | grep version: | cut -d " " -f 2)
-          echo "GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV
-          helm package charts/gha-runner-scale-set/ --version="${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}"
-          helm push gha-runner-scale-set-"${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
-
-      - name: Job summary
-        run: |
-          echo "New helm chart for gha-runner-scale-set published successfully!" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
-          echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
-          echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
-          echo "- gha-runner-scale-set Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/gha-validate-chart.yaml b/.github/workflows/gha-validate-chart.yaml
deleted file mode 100644
index ab6735eca1..0000000000
--- a/.github/workflows/gha-validate-chart.yaml
+++ /dev/null
@@ -1,125 +0,0 @@
-name: (gha) Validate Helm Charts
-
-on:
-  pull_request:
-    branches:
-      - master
-    paths:
-      - 'charts/**'
-      - '.github/workflows/gha-validate-chart.yaml'
-      - '!charts/actions-runner-controller/**'
-      - '!**.md'
-  push:
-    paths:
-      - 'charts/**'
-      - '.github/workflows/gha-validate-chart.yaml'
-      - '!charts/actions-runner-controller/**'
-      - '!**.md'
-  workflow_dispatch:
-env:
-  KUBE_SCORE_VERSION: 1.16.1
-  HELM_VERSION: v3.8.0
-
-permissions:
-  contents: read
-
-concurrency:
-  # This will make sure we only apply the concurrency limits on pull requests
-  # but not pushes to master branch by making the concurrency group name unique
-  # for pushes
-  group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
-  cancel-in-progress: true
-
-jobs:
-  validate-chart:
-    name: Lint Chart
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 0
-
-      - name: Set up Helm
-        # Using https://github.com/Azure/setup-helm/releases/tag/v3.5
-        uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
-        with:
-          version: ${{ env.HELM_VERSION }}
-
-      - name: Set up kube-score
-        run: |
-          wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
-          chmod 755 kube-score
-
-      - name: Kube-score generated manifests
-        run: helm template  --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
-              --ignore-test pod-networkpolicy
-              --ignore-test deployment-has-poddisruptionbudget
-              --ignore-test deployment-has-host-podantiaffinity
-              --ignore-test container-security-context
-              --ignore-test pod-probes
-              --ignore-test container-image-tag
-              --enable-optional-test container-security-context-privileged
-              --enable-optional-test container-security-context-readonlyrootfilesystem
-
-      # python is a requirement for the chart-testing action below (supports yamllint among other tests)
-      - uses: actions/setup-python@v4
-        with:
-          python-version: '3.11'
-
-      - name: Set up chart-testing
-        uses: helm/chart-testing-action@v2.6.0
-
-      - name: Run chart-testing (list-changed)
-        id: list-changed
-        run: |
-          ct version
-          changed=$(ct list-changed --config charts/.ci/ct-config-gha.yaml)
-          if [[ -n "$changed" ]]; then
-            echo "changed=true" >> $GITHUB_OUTPUT
-          fi
-
-      - name: Run chart-testing (lint)
-        run: |
-          ct lint --config charts/.ci/ct-config-gha.yaml
-
-      - name: Set up docker buildx
-        uses: docker/setup-buildx-action@v2
-        if: steps.list-changed.outputs.changed == 'true'
-        with:
-          version: latest
-
-      - name: Build controller image
-        uses: docker/build-push-action@v3
-        if: steps.list-changed.outputs.changed == 'true'
-        with:
-          file: Dockerfile
-          platforms: linux/amd64
-          load: true
-          build-args: |
-            DOCKER_IMAGE_NAME=test-arc
-            VERSION=dev
-          tags: |
-            test-arc:dev
-          cache-from: type=gha
-          cache-to: type=gha,mode=max
-
-      - name: Create kind cluster
-        uses: helm/kind-action@v1.4.0
-        if: steps.list-changed.outputs.changed == 'true'
-        with:
-          cluster_name: chart-testing
-
-      - name: Load image into cluster
-        if: steps.list-changed.outputs.changed == 'true'
-        run: |
-            export DOCKER_IMAGE_NAME=test-arc
-            export VERSION=dev
-            export IMG_RESULT=load
-            make docker-buildx
-            kind load docker-image test-arc:dev --name chart-testing
-
-      - name: Run chart-testing (install)
-        if: steps.list-changed.outputs.changed == 'true'
-        run: |
-          ct install --config charts/.ci/ct-config-gha.yaml
diff --git a/.github/workflows/global-publish-canary.yaml b/.github/workflows/global-publish-canary.yaml
index 11a89d5b43..ed3bbe8026 100644
--- a/.github/workflows/global-publish-canary.yaml
+++ b/.github/workflows/global-publish-canary.yaml
@@ -46,45 +46,6 @@ env:
   PUSH_TO_REGISTRIES: true
 
 jobs:
-  legacy-canary-build:
-    name: Build and Publish Legacy Canary Image
-    runs-on: ubuntu-latest
-    env:
-      DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
-      TARGET_ORG: actions-runner-controller
-      TARGET_REPO: actions-runner-controller
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v3
-
-      - name: Get Token
-        id: get_workflow_token
-        uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
-        with:
-          application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
-          application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
-          organization: ${{ env.TARGET_ORG }}
-
-      - name: Trigger Build And Push Images To Registries
-        run: |
-          # Authenticate
-          gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
-
-          # Trigger the workflow run
-          jq -n '{"event_type": "canary", "client_payload": {"sha": "${{ github.sha }}", "push_to_registries": ${{ env.PUSH_TO_REGISTRIES }}}}' \
-            | gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
-
-      - name: Job summary
-        run: |
-          echo "The [publish-canary](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-canary.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
-          echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
-          echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
-          echo "" >> $GITHUB_STEP_SUMMARY
-          echo "**Status:**" >> $GITHUB_STEP_SUMMARY
-          echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
-
   canary-build:
     name: Build and Publish gha-runner-scale-set-controller Canary Image
     runs-on: ubuntu-latest
diff --git a/.github/workflows/global-run-first-interaction.yaml b/.github/workflows/global-run-first-interaction.yaml
deleted file mode 100644
index ce1139a581..0000000000
--- a/.github/workflows/global-run-first-interaction.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-name: First Interaction
-
-on:
-  issues:
-    types: [opened]
-  pull_request:
-    branches: [master]
-    types: [opened]
-
-jobs:
-  check_for_first_interaction:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - uses: actions/first-interaction@main
-        with:
-          repo-token: ${{ secrets.GITHUB_TOKEN }}
-          issue-message: |
-            Hello! Thank you for filing an issue.
-
-            The maintainers will triage your issue shortly.
-
-            In the meantime, please take a look at the [troubleshooting guide](https://github.com/actions/actions-runner-controller/blob/master/TROUBLESHOOTING.md) for bug reports.
-            
-            If this is a feature request, please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md).
-          pr-message: |
-            Hello! Thank you for your contribution.
-
-            Please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md) to understand the project's testing and code conventions.
diff --git a/.github/workflows/global-run-stale.yaml b/.github/workflows/global-run-stale.yaml
deleted file mode 100644
index a84af45bf6..0000000000
--- a/.github/workflows/global-run-stale.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-name: Run Stale Bot
-on:
-  schedule:
-    - cron: '30 1 * * *'
-
-permissions:
-  contents: read
-
-jobs:
-  stale:
-    name: Run Stale
-    runs-on: ubuntu-latest
-    permissions:
-      issues: write         # for actions/stale to close stale issues
-      pull-requests: write  # for actions/stale to close stale PRs
-    steps:
-      - uses: actions/stale@v6
-        with:
-          stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
-          # turn off stale for both issues and PRs
-          days-before-stale: -1
-          # turn stale back on for issues only
-          days-before-issue-stale: 30
-          days-before-issue-close: 14
-          exempt-issue-labels: 'pinned,security,enhancement,refactor,documentation,chore,bug,dependencies,needs-investigation'
diff --git a/cmd/ghalistener/app/app.go b/cmd/ghalistener/app/app.go
index e21703c978..ce28ca3528 100644
--- a/cmd/ghalistener/app/app.go
+++ b/cmd/ghalistener/app/app.go
@@ -11,6 +11,7 @@ import (
 	"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
 	"github.com/actions/actions-runner-controller/github/actions"
 	"github.com/go-logr/logr"
+	"go.opentelemetry.io/otel"
 	"golang.org/x/sync/errgroup"
 )
 
@@ -105,6 +106,9 @@ func New(config config.Config) (*App, error) {
 }
 
 func (app *App) Run(ctx context.Context) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "App.Run")
+	defer span.End()
+
 	var errs []error
 	if app.worker == nil {
 		errs = append(errs, fmt.Errorf("worker not initialized"))
diff --git a/cmd/ghalistener/app/mocks/listener.go b/cmd/ghalistener/app/mocks/listener.go
index c177ace62a..d2ab3b1ee5 100644
--- a/cmd/ghalistener/app/mocks/listener.go
+++ b/cmd/ghalistener/app/mocks/listener.go
@@ -7,6 +7,7 @@ import (
 
 	listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
 	mock "github.com/stretchr/testify/mock"
+	"go.opentelemetry.io/otel"
 )
 
 // Listener is an autogenerated mock type for the Listener type
@@ -16,6 +17,9 @@ type Listener struct {
 
 // Listen provides a mock function with given fields: ctx, handler
 func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen")
+	defer span.End()
+
 	ret := _m.Called(ctx, handler)
 
 	var r0 error
diff --git a/cmd/ghalistener/app/mocks/worker.go b/cmd/ghalistener/app/mocks/worker.go
index 9f24819df1..c8ae450698 100644
--- a/cmd/ghalistener/app/mocks/worker.go
+++ b/cmd/ghalistener/app/mocks/worker.go
@@ -4,6 +4,7 @@ package mocks
 
 import (
 	actions "github.com/actions/actions-runner-controller/github/actions"
+	"go.opentelemetry.io/otel"
 
 	context "context"
 
@@ -17,6 +18,9 @@ type Worker struct {
 
 // HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
 func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleDesiredRunnerCount")
+	defer span.End()
+
 	ret := _m.Called(ctx, count, acquireCount)
 
 	var r0 int
@@ -41,6 +45,9 @@ func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acqui
 
 // HandleJobStarted provides a mock function with given fields: ctx, jobInfo
 func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleJobStarted")
+	defer span.End()
+
 	ret := _m.Called(ctx, jobInfo)
 
 	var r0 error
diff --git a/cmd/ghalistener/listener/listener.go b/cmd/ghalistener/listener/listener.go
index a9cf0838a6..d97d71499a 100644
--- a/cmd/ghalistener/listener/listener.go
+++ b/cmd/ghalistener/listener/listener.go
@@ -7,12 +7,16 @@ import (
 	"fmt"
 	"net/http"
 	"os"
+	"sync"
 	"time"
 
 	"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
 	"github.com/actions/actions-runner-controller/github/actions"
 	"github.com/go-logr/logr"
 	"github.com/google/uuid"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/trace"
+	"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
 )
 
 const (
@@ -125,6 +129,9 @@ type Handler interface {
 // The handler is responsible for handling the initial message and subsequent messages.
 // If an error occurs during any step, Listen returns an error.
 func (l *Listener) Listen(ctx context.Context, handler Handler) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen")
+	defer span.End()
+
 	if err := l.createSession(ctx); err != nil {
 		return fmt.Errorf("createSession failed: %w", err)
 	}
@@ -160,34 +167,149 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error {
 		default:
 		}
 
+		ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen.loop", trace.WithNewRoot())
+
 		msg, err := l.getMessage(ctx)
 		if err != nil {
+			span.End()
 			return fmt.Errorf("failed to get message: %w", err)
 		}
 
 		if msg == nil {
 			_, err := handler.HandleDesiredRunnerCount(ctx, 0, 0)
 			if err != nil {
+				span.End()
 				return fmt.Errorf("handling nil message failed: %w", err)
 			}
 
+			span.End()
 			continue
 		}
 
 		// Remove cancellation from the context to avoid cancelling the message handling.
 		if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil {
+			span.End()
 			return fmt.Errorf("failed to handle message: %w", err)
 		}
+
+		span.End()
+	}
+}
+
+type tracedJob struct {
+	jobSpan             tracer.Span
+	runnerSetAssignSpan tracer.Span
+	runnerAssignSpan    tracer.Span
+	runnerRunJobSpan    tracer.Span
+}
+
+var mu sync.Mutex
+var tracedJobs map[string]*tracedJob
+
+func (l *Listener) progressTraces(parsedMsg *parsedMessage) {
+	mu.Lock()
+	defer mu.Unlock()
+
+	if tracedJobs == nil {
+		tracedJobs = make(map[string]*tracedJob)
+	}
+
+	for _, j := range parsedMsg.jobsAvailable {
+		jobSpan := tracer.StartSpan(
+			"GitHub Actions Workflow Run",
+			tracer.StartTime(j.QueueTime),
+			tracer.Tag("runner_request_id", fmt.Sprintf("%d", j.RunnerRequestId)),
+			tracer.Tag("repository_name", j.RepositoryName),
+			tracer.Tag("owner_name", j.OwnerName),
+			tracer.Tag("workflow_ref", fmt.Sprintf("%s", j.JobWorkflowRef)),
+			tracer.Tag("workflow_run_id", fmt.Sprintf("%d", j.WorkflowRunId)),
+		)
+
+		runnerSetAssignSpan := tracer.StartSpan(
+			"runnerSetAssign",
+			tracer.ChildOf(jobSpan.Context()),
+			tracer.StartTime(j.QueueTime),
+			tracer.Tag("runner_request_id", fmt.Sprintf("%d", j.RunnerRequestId)),
+			tracer.Tag("repository_name", j.RepositoryName),
+			tracer.Tag("owner_name", j.OwnerName),
+		)
+
+		reqID := fmt.Sprintf("%d", j.RunnerRequestId)
+		tracedJobs[reqID] = &tracedJob{
+			jobSpan:             jobSpan,
+			runnerSetAssignSpan: runnerSetAssignSpan,
+		}
+
+		l.logger.Info("Listener.progressTraces: Job available", "queueTime", j.QueueTime, "runnerAssignTime", j.ScaleSetAssignTime, "requestLabels", j.RequestLabels, "now", time.Now())
+	}
+
+	for _, j := range parsedMsg.jobsStarted {
+		reqID := fmt.Sprintf("%d", j.RunnerRequestId)
+		t := tracedJobs[reqID]
+		if t == nil {
+			s := tracer.StartSpan(fmt.Sprintf("%s", j.JobWorkflowRef), tracer.StartTime(j.QueueTime))
+			tracedJobs[reqID] = &tracedJob{jobSpan: s}
+
+			l.logger.Error(errors.New("job and runnerSetAssign spans have not started yet"), "runnerRequestId", j.RunnerRequestId)
+		} else {
+			if t.runnerSetAssignSpan == nil {
+				l.logger.Error(errors.New("runnerSetAssignSpan has not started yet"), "runnerRequestId", j.RunnerRequestId)
+			} else {
+				t.runnerSetAssignSpan.Finish(tracer.FinishTime(j.RunnerAssignTime))
+			}
+
+			t.runnerAssignSpan = tracer.StartSpan(
+				"runnerAssign",
+				tracer.ChildOf(t.jobSpan.Context()),
+				tracer.StartTime(j.RunnerAssignTime),
+			)
+			now := time.Now()
+			t.runnerAssignSpan.Finish(tracer.FinishTime(now))
+
+			t.runnerRunJobSpan = tracer.StartSpan(
+				"runnerRunJob",
+				tracer.ChildOf(t.jobSpan.Context()),
+				tracer.StartTime(now),
+			)
+
+			l.logger.Info("Listener.progressTraces: Job started", "queueTime", j.QueueTime, "runnerAssignTime", j.RunnerAssignTime, "requestLabels", j.RequestLabels, "now", now)
+		}
+	}
+
+	for _, j := range parsedMsg.jobsCompleted {
+		reqID := fmt.Sprintf("%d", j.RunnerRequestId)
+		t := tracedJobs[reqID]
+		if t == nil {
+			s := tracer.StartSpan(fmt.Sprintf("%s", j.JobWorkflowRef), tracer.StartTime(j.QueueTime))
+			t = &tracedJob{jobSpan: s}
+			tracedJobs[reqID] = t
+
+			l.logger.Error(errors.New("job, runnerSetAssign and runnerAssign spans have not started yet"), "runnerRequestId", j.RunnerRequestId)
+		} else {
+			if t.runnerRunJobSpan == nil {
+				l.logger.Error(errors.New("runnerRunJobSPan has not started yet"), "runnerRequestId", j.RunnerRequestId)
+			} else {
+				t.runnerRunJobSpan.Finish(tracer.FinishTime(j.FinishTime))
+			}
+		}
+		s := t.jobSpan
+		s.Finish(tracer.FinishTime(j.FinishTime))
+		delete(tracedJobs, reqID)
 	}
 }
 
 func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.handleMessage")
+	defer span.End()
+
 	parsedMsg, err := l.parseMessage(ctx, msg)
 	if err != nil {
 		return fmt.Errorf("failed to parse message: %w", err)
 	}
 	l.metrics.PublishStatistics(parsedMsg.statistics)
 
+	l.progressTraces(parsedMsg)
+
 	if len(parsedMsg.jobsAvailable) > 0 {
 		acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
 		if err != nil {
@@ -223,6 +345,9 @@ func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *acti
 }
 
 func (l *Listener) createSession(ctx context.Context) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.createSession")
+	defer span.End()
+
 	var session *actions.RunnerScaleSetSession
 	var retries int
 
@@ -268,6 +393,9 @@ func (l *Listener) createSession(ctx context.Context) error {
 }
 
 func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.getMessage")
+	defer span.End()
+
 	l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
 	msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
 	if err == nil { // if NO error
@@ -294,6 +422,9 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa
 }
 
 func (l *Listener) deleteLastMessage(ctx context.Context) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.deleteLastMessage")
+	defer span.End()
+
 	l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
 	err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
 	if err == nil { // if NO error
@@ -325,6 +456,9 @@ type parsedMessage struct {
 }
 
 func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.parseMessage")
+	defer span.End()
+
 	if msg.MessageType != "RunnerScaleSetJobMessages" {
 		l.logger.Info("Skipping message", "messageType", msg.MessageType)
 		return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
@@ -398,6 +532,9 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
 }
 
 func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.acquireAvailableJobs", trace.WithLinks())
+	defer span.End()
+
 	ids := make([]int64, 0, len(jobsAvailable))
 	for _, job := range jobsAvailable {
 		ids = append(ids, job.RunnerRequestId)
@@ -428,6 +565,9 @@ func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*ac
 }
 
 func (l *Listener) refreshSession(ctx context.Context) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Listener.refreshSession")
+	defer span.End()
+
 	l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
 	session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
 	if err != nil {
diff --git a/cmd/ghalistener/listener/mocks/client.go b/cmd/ghalistener/listener/mocks/client.go
index a36c934401..0cfdd15e3e 100644
--- a/cmd/ghalistener/listener/mocks/client.go
+++ b/cmd/ghalistener/listener/mocks/client.go
@@ -6,6 +6,7 @@ import (
 	context "context"
 
 	actions "github.com/actions/actions-runner-controller/github/actions"
+	"go.opentelemetry.io/otel"
 
 	mock "github.com/stretchr/testify/mock"
 
@@ -19,6 +20,9 @@ type Client struct {
 
 // AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
 func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Client.AcquireJobs")
+	defer span.End()
+
 	ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
 
 	var r0 []int64
@@ -45,6 +49,9 @@ func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, message
 
 // CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
 func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Client.CreateMessageSession")
+	defer span.End()
+
 	ret := _m.Called(ctx, runnerScaleSetId, owner)
 
 	var r0 *actions.RunnerScaleSetSession
@@ -71,6 +78,9 @@ func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int
 
 // DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
 func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Client.DeleteMessage")
+	defer span.End()
+
 	ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
 
 	var r0 error
@@ -85,6 +95,9 @@ func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, mes
 
 // DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
 func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Client.DeleteMessageSession")
+	defer span.End()
+
 	ret := _m.Called(ctx, runnerScaleSetId, sessionId)
 
 	var r0 error
@@ -99,6 +112,9 @@ func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int
 
 // GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
 func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Client.GetAcquirableJobs")
+	defer span.End()
+
 	ret := _m.Called(ctx, runnerScaleSetId)
 
 	var r0 *actions.AcquirableJobList
@@ -125,6 +141,9 @@ func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (
 
 // GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
 func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Client.GetMessage")
+	defer span.End()
+
 	ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
 
 	var r0 *actions.RunnerScaleSetMessage
@@ -151,6 +170,9 @@ func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messag
 
 // RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
 func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Client.RefreshMessageSession")
+	defer span.End()
+
 	ret := _m.Called(ctx, runnerScaleSetId, sessionId)
 
 	var r0 *actions.RunnerScaleSetSession
diff --git a/cmd/ghalistener/listener/mocks/handler.go b/cmd/ghalistener/listener/mocks/handler.go
index b910d79f9e..195630bcc2 100644
--- a/cmd/ghalistener/listener/mocks/handler.go
+++ b/cmd/ghalistener/listener/mocks/handler.go
@@ -6,6 +6,7 @@ import (
 	context "context"
 
 	actions "github.com/actions/actions-runner-controller/github/actions"
+	"go.opentelemetry.io/otel"
 
 	mock "github.com/stretchr/testify/mock"
 )
@@ -17,6 +18,9 @@ type Handler struct {
 
 // HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
 func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Handler.HandleDesiredRunnerCount")
+	defer span.End()
+
 	ret := _m.Called(ctx, count, jobsCompleted)
 
 	var r0 int
@@ -41,6 +45,9 @@ func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobs
 
 // HandleJobStarted provides a mock function with given fields: ctx, jobInfo
 func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Handler.HandleJobStarted")
+	defer span.End()
+
 	ret := _m.Called(ctx, jobInfo)
 
 	var r0 error
diff --git a/cmd/ghalistener/main.go b/cmd/ghalistener/main.go
index 10436b30e4..f2f40827da 100644
--- a/cmd/ghalistener/main.go
+++ b/cmd/ghalistener/main.go
@@ -10,9 +10,30 @@ import (
 
 	"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
 	"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
+
+	"go.opentelemetry.io/otel"
+	ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry"
+
+	"go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
+	"go.opentelemetry.io/otel/log/global"
+	otellog "go.opentelemetry.io/otel/sdk/log"
 )
 
 func main() {
+	provider := ddotel.NewTracerProvider()
+	defer provider.Shutdown()
+
+	otel.SetTracerProvider(provider)
+
+	loggerProvider, err := newLoggerProvider()
+	if err != nil {
+		return
+	}
+
+	global.SetLoggerProvider(loggerProvider)
+
+	log.Printf("Enabled OpenTelemetry Tracing")
+
 	configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
 	if !ok {
 		fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
@@ -38,3 +59,15 @@ func main() {
 		os.Exit(1)
 	}
 }
+
+func newLoggerProvider() (*otellog.LoggerProvider, error) {
+	logExporter, err := stdoutlog.New()
+	if err != nil {
+		return nil, err
+	}
+
+	loggerProvider := otellog.NewLoggerProvider(
+		otellog.WithProcessor(otellog.NewBatchProcessor(logExporter)),
+	)
+	return loggerProvider, nil
+}
diff --git a/cmd/ghalistener/metrics/metrics.go b/cmd/ghalistener/metrics/metrics.go
index 2940dd2f49..fce6d040e3 100644
--- a/cmd/ghalistener/metrics/metrics.go
+++ b/cmd/ghalistener/metrics/metrics.go
@@ -3,6 +3,7 @@ package metrics
 import (
 	"context"
 	"net/http"
+	"os"
 	"strconv"
 	"time"
 
@@ -10,6 +11,7 @@ import (
 	"github.com/go-logr/logr"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
+	"go.opentelemetry.io/otel"
 )
 
 const (
@@ -45,14 +47,25 @@ var (
 		labelKeyJobName,
 		labelKeyJobWorkflowRef,
 		labelKeyEventName,
+		labelKeyRunnerID,
+		labelKeyRunnerName,
 	}
 
-	completedJobsTotalLabels   = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
-	jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
-	startedJobsTotalLabels     = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
-	jobStartupDurationLabels   = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
+	completedJobLabels []string
+
+	includeRunnerScaleSetNameInJobLabels = false
 )
 
+func init() {
+	if os.Getenv("INCLUDE_RUNNER_SCALE_SET_NAME_IN_JOB_LABELS") == "true" {
+		includeRunnerScaleSetNameInJobLabels = true
+
+		jobLabels = append(jobLabels, labelKeyRunnerScaleSetName)
+	}
+
+	completedJobLabels = append([]string{}, append(jobLabels, labelKeyJobResult)...)
+}
+
 var (
 	assignedJobs = prometheus.NewGaugeVec(
 		prometheus.GaugeOpts{
@@ -132,7 +145,7 @@ var (
 			Name:      "started_jobs_total",
 			Help:      "Total number of jobs started.",
 		},
-		startedJobsTotalLabels,
+		jobLabels,
 	)
 
 	completedJobsTotal = prometheus.NewCounterVec(
@@ -141,7 +154,7 @@ var (
 			Help:      "Total number of jobs completed.",
 			Subsystem: githubScaleSetSubsystem,
 		},
-		completedJobsTotalLabels,
+		completedJobLabels,
 	)
 
 	jobStartupDurationSeconds = prometheus.NewHistogramVec(
@@ -151,7 +164,7 @@ var (
 			Help:      "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
 			Buckets:   runtimeBuckets,
 		},
-		jobStartupDurationLabels,
+		jobLabels,
 	)
 
 	jobExecutionDurationSeconds = prometheus.NewHistogramVec(
@@ -161,7 +174,7 @@ var (
 			Help:      "Time spent executing workflow jobs by the scale set (in seconds).",
 			Buckets:   runtimeBuckets,
 		},
-		jobExecutionDurationLabels,
+		completedJobLabels,
 	)
 )
 
@@ -222,7 +235,7 @@ type baseLabels struct {
 }
 
 func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
-	return prometheus.Labels{
+	l := prometheus.Labels{
 		labelKeyEnterprise:     b.enterprise,
 		labelKeyOrganization:   jobBase.OwnerName,
 		labelKeyRepository:     jobBase.RepositoryName,
@@ -230,6 +243,10 @@ func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Label
 		labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
 		labelKeyEventName:      jobBase.EventName,
 	}
+
+	if includeRunnerScaleSetNameInJobLabels {
+		l[labelKeyRunnerScaleSetName] = b.scaleSetName
+	}
 }
 
 func (b *baseLabels) scaleSetLabels() prometheus.Labels {
@@ -336,6 +353,9 @@ func NewExporter(config ExporterConfig) ServerPublisher {
 }
 
 func (e *exporter) ListenAndServe(ctx context.Context) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "exporter.ListenAndServe")
+	defer span.End()
+
 	e.logger.Info("starting metrics server", "addr", e.srv.Addr)
 	go func() {
 		<-ctx.Done()
diff --git a/cmd/ghalistener/metrics/mocks/server_publisher.go b/cmd/ghalistener/metrics/mocks/server_publisher.go
index 01aac02edc..24fd4ee5e8 100644
--- a/cmd/ghalistener/metrics/mocks/server_publisher.go
+++ b/cmd/ghalistener/metrics/mocks/server_publisher.go
@@ -6,6 +6,7 @@ import (
 	context "context"
 
 	actions "github.com/actions/actions-runner-controller/github/actions"
+	"go.opentelemetry.io/otel"
 
 	mock "github.com/stretchr/testify/mock"
 )
@@ -17,6 +18,9 @@ type ServerPublisher struct {
 
 // ListenAndServe provides a mock function with given fields: ctx
 func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "ServerPublisher.ListenAndServe")
+	defer span.End()
+
 	ret := _m.Called(ctx)
 
 	var r0 error
diff --git a/cmd/ghalistener/worker/worker.go b/cmd/ghalistener/worker/worker.go
index 9d6266bf92..93ba6281dd 100644
--- a/cmd/ghalistener/worker/worker.go
+++ b/cmd/ghalistener/worker/worker.go
@@ -11,6 +11,8 @@ import (
 	"github.com/actions/actions-runner-controller/logging"
 	jsonpatch "github.com/evanphx/json-patch"
 	"github.com/go-logr/logr"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
 	kerrors "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/kubernetes"
@@ -96,6 +98,16 @@ func (w *Worker) applyDefaults() error {
 // about the ephemeral runner that should not be deleted when scaling down.
 // It returns an error if there is any issue with updating the job information.
 func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleJobStarted")
+	defer span.End()
+
+	span.SetAttributes(
+		attribute.String("runner.name", jobInfo.RunnerName),
+		attribute.String("runner.repo.name", jobInfo.RepositoryName),
+		attribute.String("workflow.ref", jobInfo.JobWorkflowRef),
+		attribute.Int64("workflow.run.id", jobInfo.WorkflowRunId),
+	)
+
 	w.logger.Info("Updating job info for the runner",
 		"runnerName", jobInfo.RunnerName,
 		"ownerName", jobInfo.OwnerName,
@@ -164,6 +176,9 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
 // Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
 // If any error occurs during the process, it returns an error with a descriptive message.
 func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleDesiredRunnerCount")
+	defer span.End()
+
 	patchID := w.setDesiredWorkerState(count, jobsCompleted)
 
 	original, err := json.Marshal(
diff --git a/cmd/githubrunnerscalesetlistener/autoScalerKubernetesManager.go b/cmd/githubrunnerscalesetlistener/autoScalerKubernetesManager.go
index 20d828ac93..2023f6f220 100644
--- a/cmd/githubrunnerscalesetlistener/autoScalerKubernetesManager.go
+++ b/cmd/githubrunnerscalesetlistener/autoScalerKubernetesManager.go
@@ -8,6 +8,7 @@ import (
 	"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
 	jsonpatch "github.com/evanphx/json-patch"
 	"github.com/go-logr/logr"
+	"go.opentelemetry.io/otel"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/client-go/kubernetes"
 	"k8s.io/client-go/rest"
@@ -38,6 +39,9 @@ func NewKubernetesManager(logger *logr.Logger) (*AutoScalerKubernetesManager, er
 }
 
 func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerKubernetesManager.ScaleEphemeralRunnerSet")
+	defer span.End()
+
 	original := &v1alpha1.EphemeralRunnerSet{
 		Spec: v1alpha1.EphemeralRunnerSetSpec{
 			Replicas: -1,
@@ -83,6 +87,9 @@ func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Contex
 }
 
 func (k *AutoScalerKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, workflowRunId, jobRequestId int64) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerKubernetesManager.UpdateEphemeralRunnerWithJobInfo")
+	defer span.End()
+
 	original := &v1alpha1.EphemeralRunner{}
 	originalJson, err := json.Marshal(original)
 	if err != nil {
diff --git a/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go
index 26c5072d6b..e7b1ac6b71 100644
--- a/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go
+++ b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go
@@ -13,6 +13,7 @@ import (
 	"github.com/go-logr/logr"
 	"github.com/google/uuid"
 	"github.com/pkg/errors"
+	"go.opentelemetry.io/otel"
 )
 
 const (
@@ -38,6 +39,9 @@ func NewAutoScalerClient(
 	runnerScaleSetId int,
 	options ...func(*AutoScalerClient),
 ) (*AutoScalerClient, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "NewAutoScalerClient")
+	defer span.End()
+
 	listener := AutoScalerClient{
 		logger: logger.WithName("auto_scaler"),
 	}
@@ -59,6 +63,9 @@ func NewAutoScalerClient(
 }
 
 func createSession(ctx context.Context, logger *logr.Logger, client actions.ActionsService, runnerScaleSetId int) (*actions.RunnerScaleSetSession, *actions.RunnerScaleSetMessage, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "createSession")
+	defer span.End()
+
 	hostName, err := os.Hostname()
 	if err != nil {
 		hostName = uuid.New().String()
@@ -130,6 +137,9 @@ func (m *AutoScalerClient) Close() error {
 }
 
 func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.GetRunnerScaleSetMessage")
+	defer span.End()
+
 	if m.initialMessage != nil {
 		err := handler(m.initialMessage)
 		if err != nil {
@@ -162,6 +172,9 @@ func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler
 }
 
 func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.deleteMessage")
+	defer span.End()
+
 	err := m.client.DeleteMessage(ctx, messageId)
 	if err != nil {
 		return fmt.Errorf("delete message failed from refreshing client. %w", err)
@@ -172,6 +185,9 @@ func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) e
 }
 
 func (m *AutoScalerClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.AcquireJobsForRunnerScaleSet")
+	defer span.End()
+
 	m.logger.Info("acquiring jobs.", "request count", len(requestIds), "requestIds", fmt.Sprint(requestIds))
 	if len(requestIds) == 0 {
 		return nil
diff --git a/cmd/githubrunnerscalesetlistener/autoScalerService.go b/cmd/githubrunnerscalesetlistener/autoScalerService.go
index c3097212d0..c1a4df5d94 100644
--- a/cmd/githubrunnerscalesetlistener/autoScalerService.go
+++ b/cmd/githubrunnerscalesetlistener/autoScalerService.go
@@ -10,6 +10,7 @@ import (
 	"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
 	"github.com/actions/actions-runner-controller/github/actions"
 	"github.com/go-logr/logr"
+	"go.opentelemetry.io/otel"
 )
 
 type ScaleSettings struct {
@@ -60,6 +61,9 @@ func NewService(
 	settings *ScaleSettings,
 	options ...func(*Service),
 ) (*Service, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "NewService")
+	defer span.End()
+
 	s := &Service{
 		ctx:                ctx,
 		rsClient:           rsClient,
diff --git a/cmd/githubrunnerscalesetlistener/main.go b/cmd/githubrunnerscalesetlistener/main.go
index ebe7fd577e..fc461fb201 100644
--- a/cmd/githubrunnerscalesetlistener/main.go
+++ b/cmd/githubrunnerscalesetlistener/main.go
@@ -34,6 +34,7 @@ import (
 	"github.com/go-logr/logr"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
+	"go.opentelemetry.io/otel"
 	"golang.org/x/net/http/httpproxy"
 	"golang.org/x/sync/errgroup"
 )
@@ -155,6 +156,9 @@ type runOptions struct {
 }
 
 func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOptions) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "run")
+	defer span.End()
+
 	// Create root context and hook with sigint and sigterm
 	creds := &actions.ActionsAuth{}
 	if rc.Token != "" {
diff --git a/cmd/githubrunnerscalesetlistener/mock_KubernetesManager.go b/cmd/githubrunnerscalesetlistener/mock_KubernetesManager.go
index 8c44598cb3..11fe41af72 100644
--- a/cmd/githubrunnerscalesetlistener/mock_KubernetesManager.go
+++ b/cmd/githubrunnerscalesetlistener/mock_KubernetesManager.go
@@ -6,6 +6,7 @@ import (
 	context "context"
 
 	mock "github.com/stretchr/testify/mock"
+	"go.opentelemetry.io/otel"
 )
 
 // MockKubernetesManager is an autogenerated mock type for the KubernetesManager type
@@ -15,6 +16,9 @@ type MockKubernetesManager struct {
 
 // ScaleEphemeralRunnerSet provides a mock function with given fields: ctx, namespace, resourceName, runnerCount
 func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace string, resourceName string, runnerCount int) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "MockKubernetesManager.ScaleEphemeralRunnerSet")
+	defer span.End()
+
 	ret := _m.Called(ctx, namespace, resourceName, runnerCount)
 
 	var r0 error
@@ -29,6 +33,9 @@ func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, na
 
 // UpdateEphemeralRunnerWithJobInfo provides a mock function with given fields: ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId
 func (_m *MockKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace string, resourceName string, ownerName string, repositoryName string, jobWorkflowRef string, jobDisplayName string, jobRequestId int64, workflowRunId int64) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "MockKubernetesManager.UpdateEphemeralRunnerWithJobInfo")
+	defer span.End()
+
 	ret := _m.Called(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId)
 
 	var r0 error
diff --git a/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go b/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go
index a6f6a5d15f..d7ff1baf36 100644
--- a/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go
+++ b/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go
@@ -6,6 +6,7 @@ import (
 	context "context"
 
 	actions "github.com/actions/actions-runner-controller/github/actions"
+	"go.opentelemetry.io/otel"
 
 	mock "github.com/stretchr/testify/mock"
 )
@@ -17,6 +18,9 @@ type MockRunnerScaleSetClient struct {
 
 // AcquireJobsForRunnerScaleSet provides a mock function with given fields: ctx, requestIds
 func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "MockRunnerScaleSetClient.AcquireJobsForRunnerScaleSet")
+	defer span.End()
+
 	ret := _m.Called(ctx, requestIds)
 
 	var r0 error
@@ -31,6 +35,9 @@ func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Con
 
 // GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity
 func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "MockRunnerScaleSetClient.GetRunnerScaleSetMessage")
+	defer span.End()
+
 	ret := _m.Called(ctx, handler, maxCapacity)
 
 	var r0 error
diff --git a/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go
index f3262c1597..664d28f74b 100644
--- a/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go
+++ b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go
@@ -8,6 +8,7 @@ import (
 	"github.com/actions/actions-runner-controller/github/actions"
 	"github.com/go-logr/logr"
 	"github.com/pkg/errors"
+	"go.opentelemetry.io/otel"
 )
 
 type SessionRefreshingClient struct {
@@ -25,6 +26,9 @@ func newSessionClient(client actions.ActionsService, logger *logr.Logger, sessio
 }
 
 func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.GetMessage")
+	defer span.End()
+
 	if maxCapacity < 0 {
 		return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
 	}
@@ -55,6 +59,9 @@ func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId
 }
 
 func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId int64) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.DeleteMessage")
+	defer span.End()
+
 	err := m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId)
 	if err == nil {
 		return nil
@@ -82,6 +89,9 @@ func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId i
 }
 
 func (m *SessionRefreshingClient) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.AcquireJobs")
+	defer span.End()
+
 	ids, err := m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds)
 	if err == nil {
 		return ids, nil
diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go
index f35c85e969..9d24612eb2 100644
--- a/controllers/actions.github.com/autoscalinglistener_controller.go
+++ b/controllers/actions.github.com/autoscalinglistener_controller.go
@@ -21,6 +21,8 @@ import (
 	"fmt"
 
 	"github.com/go-logr/logr"
+	"go.opentelemetry.io/otel"
+	otelCodes "go.opentelemetry.io/otel/codes"
 	kerrors "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/types"
@@ -70,6 +72,9 @@ type AutoscalingListenerReconciler struct {
 
 // Reconcile a AutoscalingListener resource to meet its desired spec.
 func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.Reconcile")
+	defer span.End()
+
 	log := r.Log.WithValues("autoscalinglistener", req.NamespacedName)
 
 	autoscalingListener := new(v1alpha1.AutoscalingListener)
@@ -266,6 +271,15 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
 }
 
 func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.cleanupResources")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	logger.Info("Cleaning up the listener pod")
 	listenerPod := new(corev1.Pod)
 	err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
@@ -373,6 +387,9 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
 }
 
 func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createServiceAccountForListener")
+	defer span.End()
+
 	newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
 
 	if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
@@ -390,6 +407,9 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont
 }
 
 func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createListenerPod")
+	defer span.End()
+
 	var envs []corev1.EnvVar
 	if autoscalingListener.Spec.Proxy != nil {
 		httpURL := corev1.EnvVar{
@@ -499,6 +519,9 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
 }
 
 func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (string, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.certificate")
+	defer span.End()
+
 	if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil {
 		return "", fmt.Errorf("githubServerTLS.certificateFrom is not specified")
 	}
@@ -537,6 +560,9 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
 }
 
 func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createSecretsForListener")
+	defer span.End()
+
 	newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
 
 	if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
@@ -554,6 +580,9 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con
 }
 
 func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createProxySecret")
+	defer span.End()
+
 	data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
 		var secret corev1.Secret
 		err := r.Get(ctx, types.NamespacedName{Name: s, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, &secret)
@@ -593,6 +622,9 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
 }
 
 func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.updateSecretsForListener")
+	defer span.End()
+
 	dataHash := hash.ComputeTemplateHash(secret.Data)
 	updatedMirrorSecret := mirrorSecret.DeepCopy()
 	updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
@@ -609,6 +641,9 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
 }
 
 func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createRoleForListener")
+	defer span.End()
+
 	newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener)
 
 	logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
@@ -622,6 +657,9 @@ func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Contex
 }
 
 func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Context, listenerRole *rbacv1.Role, desiredRules []rbacv1.PolicyRule, desiredRulesHash string, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.updateRoleForListener")
+	defer span.End()
+
 	updatedPatchRole := listenerRole.DeepCopy()
 	updatedPatchRole.Labels["role-policy-rules-hash"] = desiredRulesHash
 	updatedPatchRole.Rules = desiredRules
@@ -637,6 +675,9 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
 }
 
 func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createRoleBindingForListener")
+	defer span.End()
+
 	newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
 
 	logger.Info("Creating listener role binding",
diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go
index f87a11af19..860a6685db 100644
--- a/controllers/actions.github.com/autoscalingrunnerset_controller.go
+++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go
@@ -27,6 +27,8 @@ import (
 	"github.com/actions/actions-runner-controller/build"
 	"github.com/actions/actions-runner-controller/github/actions"
 	"github.com/go-logr/logr"
+	"go.opentelemetry.io/otel"
+	otelCodes "go.opentelemetry.io/otel/codes"
 	corev1 "k8s.io/api/core/v1"
 	rbacv1 "k8s.io/api/rbac/v1"
 	kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -93,6 +95,9 @@ type AutoscalingRunnerSetReconciler struct {
 
 // Reconcile a AutoscalingRunnerSet resource to meet its desired spec.
 func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.Reconcile")
+	defer span.End()
+
 	log := r.Log.WithValues("autoscalingrunnerset", req.NamespacedName)
 
 	autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet)
@@ -334,6 +339,15 @@ func (r *AutoscalingRunnerSetReconciler) drainingJobs(latestRunnerSetStatus *v1a
 }
 
 func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.cleanupListener")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	logger.Info("Cleaning up the listener")
 	var listener v1alpha1.AutoscalingListener
 	err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
@@ -355,6 +369,15 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
 }
 
 func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.cleanupEphemeralRunnerSets")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	logger.Info("Cleaning up ephemeral runner sets")
 	runnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
 	if err != nil {
@@ -373,6 +396,9 @@ func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.
 }
 
 func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.Context, oldRunnerSets []v1alpha1.EphemeralRunnerSet, logger logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.deleteEphemeralRunnerSets")
+	defer span.End()
+
 	for i := range oldRunnerSets {
 		rs := &oldRunnerSets[i]
 		// already deleted but contains finalizer so it still exists
@@ -390,6 +416,15 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
 }
 
 func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.removeFinalizersFromDependentResources")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	c := autoscalingRunnerSetFinalizerDependencyCleaner{
 		client:               r.Client,
 		autoscalingRunnerSet: autoscalingRunnerSet,
@@ -413,6 +448,9 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
 }
 
 func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createRunnerScaleSet")
+	defer span.End()
+
 	logger.Info("Creating a new runner scale set")
 	actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
 	if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
@@ -504,6 +542,9 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
 }
 
 func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.updateRunnerScaleSetRunnerGroup")
+	defer span.End()
+
 	runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
 	if err != nil {
 		logger.Error(err, "Failed to parse runner scale set ID")
@@ -547,6 +588,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
 }
 
 func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.updateRunnerScaleSetName")
+	defer span.End()
+
 	runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
 	if err != nil {
 		logger.Error(err, "Failed to parse runner scale set ID")
@@ -583,6 +627,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
 }
 
 func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.deleteRunnerScaleSet")
+	defer span.End()
+
 	scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
 	if !ok {
 		// Annotation not being present can occur in 3 scenarios
@@ -634,6 +681,9 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
 }
 
 func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createEphemeralRunnerSet")
+	defer span.End()
+
 	desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
 	if err != nil {
 		log.Error(err, "Could not create EphemeralRunnerSet")
@@ -656,6 +706,9 @@ func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Co
 }
 
 func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createAutoScalingListenerForRunnerSet")
+	defer span.End()
+
 	var imagePullSecrets []corev1.LocalObjectReference
 	for _, imagePullSecret := range r.DefaultRunnerScaleSetListenerImagePullSecrets {
 		imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{
@@ -680,6 +733,9 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
 }
 
 func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.listEphemeralRunnerSets")
+	defer span.End()
+
 	list := new(v1alpha1.EphemeralRunnerSetList)
 	if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
 		return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
@@ -689,6 +745,9 @@ func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Con
 }
 
 func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.actionsClientFor")
+	defer span.End()
+
 	var configSecret corev1.Secret
 	if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
 		return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
@@ -709,6 +768,9 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, a
 }
 
 func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.actionsClientOptionsFor")
+	defer span.End()
+
 	var options []actions.ClientOption
 
 	if autoscalingRunnerSet.Spec.Proxy != nil {
@@ -794,6 +856,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool,
 }
 
 func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeRoleBindingFinalizer")
+	defer span.End()
+
 	if c.requeue || c.err != nil {
 		return
 	}
@@ -838,6 +903,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
 }
 
 func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeRoleFinalizer")
+	defer span.End()
+
 	if c.requeue || c.err != nil {
 		return
 	}
@@ -881,6 +949,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
 }
 
 func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeServiceAccountFinalizer")
+	defer span.End()
+
 	if c.requeue || c.err != nil {
 		return
 	}
@@ -925,6 +996,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
 }
 
 func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeNoPermissionServiceAccountFinalizer")
+	defer span.End()
+
 	if c.requeue || c.err != nil {
 		return
 	}
@@ -969,6 +1043,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
 }
 
 func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeGitHubSecretFinalizer")
+	defer span.End()
+
 	if c.requeue || c.err != nil {
 		return
 	}
@@ -1013,6 +1090,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
 }
 
 func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeManagerRoleBindingFinalizer")
+	defer span.End()
+
 	if c.requeue || c.err != nil {
 		return
 	}
@@ -1057,6 +1137,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
 }
 
 func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeManagerRoleFinalizer")
+	defer span.End()
+
 	if c.requeue || c.err != nil {
 		return
 	}
diff --git a/controllers/actions.github.com/clientutil.go b/controllers/actions.github.com/clientutil.go
index 876d8dfd43..70d1124bad 100644
--- a/controllers/actions.github.com/clientutil.go
+++ b/controllers/actions.github.com/clientutil.go
@@ -3,6 +3,7 @@ package actionsgithubcom
 import (
 	"context"
 
+	"go.opentelemetry.io/otel"
 	kclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
@@ -16,6 +17,9 @@ type patcher interface {
 }
 
 func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(obj T)) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "patch")
+	defer span.End()
+
 	original := obj.DeepCopy()
 	update(obj)
 	return client.Patch(ctx, obj, kclient.MergeFrom(original))
@@ -26,6 +30,9 @@ type subResourcePatcher interface {
 }
 
 func patchSubResource[T object[T]](ctx context.Context, client subResourcePatcher, obj T, update func(obj T)) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "patchSubResource")
+	defer span.End()
+
 	original := obj.DeepCopy()
 	update(obj)
 	return client.Patch(ctx, obj, kclient.MergeFrom(original))
diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go
index 6da084b75d..34c17f5f92 100644
--- a/controllers/actions.github.com/ephemeralrunner_controller.go
+++ b/controllers/actions.github.com/ephemeralrunner_controller.go
@@ -26,6 +26,8 @@ import (
 	"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
 	"github.com/actions/actions-runner-controller/github/actions"
 	"github.com/go-logr/logr"
+	"go.opentelemetry.io/otel"
+	otelCodes "go.opentelemetry.io/otel/codes"
 	"go.uber.org/multierr"
 	corev1 "k8s.io/api/core/v1"
 	kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -68,6 +70,9 @@ type EphemeralRunnerReconciler struct {
 // For more details, check Reconcile and its Result here:
 // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile
 func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.Reconcile")
+	defer span.End()
+
 	log := r.Log.WithValues("ephemeralrunner", req.NamespacedName)
 
 	ephemeralRunner := new(v1alpha1.EphemeralRunner)
@@ -177,7 +182,9 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
 
 	if ephemeralRunner.Status.RunnerId == 0 {
 		log.Info("Creating new ephemeral runner registration and updating status with runner config")
-		return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log)
+		if r, err := r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log); r != nil {
+			return *r, err
+		}
 	}
 
 	secret := new(corev1.Secret)
@@ -188,7 +195,17 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
 		}
 		// create secret if not created
 		log.Info("Creating new ephemeral runner secret for jitconfig.")
-		return r.createSecret(ctx, ephemeralRunner, log)
+		if r, err := r.createSecret(ctx, ephemeralRunner, log); r != nil {
+			return *r, err
+		}
+
+		// Retry to get the secret that was just created.
+		// Otherwise, even though we want to continue to create the pod,
+		// it fails due to the missing secret resulting in an invalid pod spec.
+		if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
+			log.Error(err, "Failed to fetch secret")
+			return ctrl.Result{}, err
+		}
 	}
 
 	pod := new(corev1.Pod)
@@ -294,6 +311,9 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
 }
 
 func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerFromService")
+	defer span.End()
+
 	if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
 		actionsError := &actions.ActionsError{}
 		if !errors.As(err, &actionsError) {
@@ -323,6 +343,15 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context
 }
 
 func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (deleted bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupResources")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	log.Info("Cleaning up the runner pod")
 	pod := new(corev1.Pod)
 	err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
@@ -361,6 +390,15 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
 }
 
 func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupContainerHooksResources")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	log.Info("Cleaning up runner linked pods")
 	done, err = r.cleanupRunnerLinkedPods(ctx, ephemeralRunner, log)
 	if err != nil {
@@ -381,6 +419,15 @@ func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.C
 }
 
 func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerLinkedPods")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	runnerLinedLabels := client.MatchingLabels(
 		map[string]string{
 			"runner-pod": ephemeralRunner.Name,
@@ -416,6 +463,15 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
 }
 
 func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerLinkedSecrets")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	runnerLinkedLabels := client.MatchingLabels(
 		map[string]string{
 			"runner-pod": ephemeralRunner.ObjectMeta.Name,
@@ -451,6 +507,9 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
 }
 
 func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.markAsFailed")
+	defer span.End()
+
 	log.Info("Updating ephemeral runner status to Failed")
 	if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
 		obj.Status.Phase = corev1.PodFailed
@@ -470,6 +529,9 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
 }
 
 func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.markAsFinished")
+	defer span.End()
+
 	log.Info("Updating ephemeral runner status to Finished")
 	if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
 		obj.Status.Phase = corev1.PodSucceeded
@@ -484,6 +546,9 @@ func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemera
 // deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
 // It should not be responsible for setting the status to Failed.
 func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.deletePodAsFailed")
+	defer span.End()
+
 	if pod.ObjectMeta.DeletionTimestamp.IsZero() {
 		log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
 		if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
@@ -510,12 +575,15 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
 
 // updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
 // This method should always set .status.runnerId and .status.runnerJITConfig
-func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
+func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.updateStatusWithRunnerConfig")
+	defer span.End()
+
 	// Runner is not registered with the service. We need to register it first
 	log.Info("Creating ephemeral runner JIT config")
 	actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
 	if err != nil {
-		return ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
+		return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
 	}
 
 	jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
@@ -525,12 +593,12 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
 	if err != nil {
 		actionsError := &actions.ActionsError{}
 		if !errors.As(err, &actionsError) {
-			return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
+			return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
 		}
 
 		if actionsError.StatusCode != http.StatusConflict ||
 			!actionsError.IsException("AgentExistsException") {
-			return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
+			return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
 		}
 
 		// If the runner with the name we want already exists it means:
@@ -543,12 +611,12 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
 		log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
 		existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
 		if err != nil {
-			return ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
+			return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
 		}
 
 		if existingRunner == nil {
 			log.Info("Runner with the same name does not exist, re-queuing the reconciliation")
-			return ctrl.Result{Requeue: true}, nil
+			return &ctrl.Result{Requeue: true}, nil
 		}
 
 		log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
@@ -556,16 +624,16 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
 			log.Info("Removing the runner with the same name")
 			err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
 			if err != nil {
-				return ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
+				return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
 			}
 
 			log.Info("Removed the runner with the same name, re-queuing the reconciliation")
-			return ctrl.Result{Requeue: true}, nil
+			return &ctrl.Result{Requeue: true}, nil
 		}
 
 		// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
 		// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
-		return ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
+		return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
 	}
 	log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
 
@@ -576,14 +644,26 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
 		obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
 	})
 	if err != nil {
-		return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
+		return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
 	}
 
+	// We want to continue without a requeue for faster pod creation.
+	//
+	// To do so, we update the status in-place, so that both continuing the loop and
+	// and requeuing and skipping updateStatusWithRunnerConfig in the next loop, will
+	// have the same effect.
+	ephemeralRunner.Status.RunnerId = jitConfig.Runner.Id
+	ephemeralRunner.Status.RunnerName = jitConfig.Runner.Name
+	ephemeralRunner.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
+
 	log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
-	return ctrl.Result{}, nil
+	return nil, nil
 }
 
 func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.createPod")
+	defer span.End()
+
 	var envs []corev1.EnvVar
 	if runner.Spec.ProxySecretRef != "" {
 		http := corev1.EnvVar{
@@ -656,21 +736,24 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
 	return ctrl.Result{}, nil
 }
 
-func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
+func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.createSecret")
+	defer span.End()
+
 	log.Info("Creating new secret for ephemeral runner")
 	jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner)
 
 	if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
-		return ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
+		return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
 	}
 
 	log.Info("Created new secret spec for ephemeral runner")
 	if err := r.Create(ctx, jitSecret); err != nil {
-		return ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
+		return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
 	}
 
 	log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
-	return ctrl.Result{Requeue: true}, nil
+	return nil, nil
 }
 
 // updateRunStatusFromPod is responsible for updating non-exiting statuses.
@@ -679,6 +762,9 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1
 // The event should not be re-queued since the termination status should be set
 // before proceeding with reconciliation logic
 func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.updateRunStatusFromPod")
+	defer span.End()
+
 	if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
 		return nil
 	}
@@ -702,6 +788,9 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
 }
 
 func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.actionsClientFor")
+	defer span.End()
+
 	secret := new(corev1.Secret)
 	if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
 		return nil, fmt.Errorf("failed to get secret: %w", err)
@@ -722,6 +811,9 @@ func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner
 }
 
 func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.actionsClientOptionsFor")
+	defer span.End()
+
 	var opts []actions.ClientOption
 	if runner.Spec.Proxy != nil {
 		proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
@@ -771,6 +863,15 @@ func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context,
 // runnerRegisteredWithService checks if the runner is still registered with the service
 // Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
 func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.runnerRegisteredWithService")
+	defer span.End()
+	defer func() {
+		if err != nil {
+			span.SetStatus(otelCodes.Error, "error")
+			span.RecordError(err)
+		}
+	}()
+
 	actionsClient, err := r.actionsClientFor(ctx, runner)
 	if err != nil {
 		return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
@@ -798,6 +899,9 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte
 }
 
 func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.deleteRunnerFromService")
+	defer span.End()
+
 	client, err := r.actionsClientFor(ctx, ephemeralRunner)
 	if err != nil {
 		return fmt.Errorf("failed to get actions client for runner: %v", err)
diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go
index c5d166a50a..4ae43f1260 100644
--- a/controllers/actions.github.com/ephemeralrunnerset_controller.go
+++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go
@@ -28,6 +28,7 @@ import (
 	"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
 	"github.com/actions/actions-runner-controller/github/actions"
 	"github.com/go-logr/logr"
+	"go.opentelemetry.io/otel"
 	"go.uber.org/multierr"
 	corev1 "k8s.io/api/core/v1"
 	kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -75,6 +76,9 @@ type EphemeralRunnerSetReconciler struct {
 // be to bring the count of EphemeralRunners to the desired one, not to patch this resource
 // until it is safe to do so
 func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.Reconcile")
+	defer span.End()
+
 	log := r.Log.WithValues("ephemeralrunnerset", req.NamespacedName)
 
 	ephemeralRunnerSet := new(v1alpha1.EphemeralRunnerSet)
@@ -250,6 +254,9 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
 }
 
 func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanupFinishedEphemeralRunners")
+	defer span.End()
+
 	// cleanup finished runners and proceed
 	var errs []error
 	for i := range finishedEphemeralRunners {
@@ -265,6 +272,9 @@ func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx conte
 }
 
 func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanUpProxySecret")
+	defer span.End()
+
 	if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil {
 		return nil
 	}
@@ -284,6 +294,9 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
 }
 
 func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanUpEphemeralRunners")
+	defer span.End()
+
 	ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
 	err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
 	if err != nil {
@@ -357,6 +370,9 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
 
 // createEphemeralRunners provisions `count` number of v1alpha1.EphemeralRunner resources in the cluster.
 func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Context, runnerSet *v1alpha1.EphemeralRunnerSet, count int, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.createEphemeralRunners")
+	defer span.End()
+
 	// Track multiple errors at once and return the bundle.
 	errs := make([]error, 0)
 	for i := 0; i < count; i++ {
@@ -386,6 +402,9 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
 }
 
 func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.createProxySecret")
+	defer span.End()
+
 	proxySecretData, err := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
 		secret := new(corev1.Secret)
 		err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunnerSet.Namespace, Name: s}, secret)
@@ -431,6 +450,9 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep
 // When this happens, the next reconcile loop will try to delete the remaining ephemeral runners
 // after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates.
 func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.deleteIdleEphemeralRunners")
+	defer span.End()
+
 	if count <= 0 {
 		return nil
 	}
@@ -477,6 +499,9 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
 }
 
 func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.deleteEphemeralRunnerWithActionsClient")
+	defer span.End()
+
 	if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
 		actionsError := &actions.ActionsError{}
 		if !errors.As(err, &actionsError) {
@@ -503,6 +528,9 @@ func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ct
 }
 
 func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.actionsClientFor")
+	defer span.End()
+
 	secret := new(corev1.Secret)
 	if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
 		return nil, fmt.Errorf("failed to get secret: %w", err)
@@ -523,6 +551,9 @@ func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs
 }
 
 func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
+	ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.actionsClientOptionsFor")
+	defer span.End()
+
 	var opts []actions.ClientOption
 	if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
 		proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go
index 49bdcac0f2..6274161a11 100644
--- a/controllers/actions.github.com/resourcebuilder.go
+++ b/controllers/actions.github.com/resourcebuilder.go
@@ -15,6 +15,7 @@ import (
 	"github.com/actions/actions-runner-controller/github/actions"
 	"github.com/actions/actions-runner-controller/hash"
 	"github.com/actions/actions-runner-controller/logging"
+	"go.opentelemetry.io/otel"
 	corev1 "k8s.io/api/core/v1"
 	rbacv1 "k8s.io/api/rbac/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -573,6 +574,9 @@ func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
 }
 
 func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
+	ctx, span := otel.Tracer("arc").Start(ctx, "resourceBuilder.newEphemeralRunnerPod")
+	defer span.End()
+
 	var newPod corev1.Pod
 
 	labels := map[string]string{}
diff --git a/go.mod b/go.mod
index 74a42a3d37..3d8bdebc99 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22.1
 
 require (
 	github.com/bradleyfalzon/ghinstallation/v2 v2.8.0
-	github.com/davecgh/go-spew v1.1.1
+	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
 	github.com/evanphx/json-patch v5.9.0+incompatible
 	github.com/go-logr/logr v1.4.1
 	github.com/golang-jwt/jwt/v4 v4.5.0
@@ -23,12 +23,17 @@ require (
 	github.com/prometheus/client_golang v1.17.0
 	github.com/stretchr/testify v1.9.0
 	github.com/teambition/rrule-go v1.8.2
+	go.opentelemetry.io/otel v1.27.0
+	go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0
+	go.opentelemetry.io/otel/log v0.3.0
+	go.opentelemetry.io/otel/sdk/log v0.3.0
 	go.uber.org/multierr v1.11.0
 	go.uber.org/zap v1.27.0
 	golang.org/x/net v0.24.0
 	golang.org/x/oauth2 v0.19.0
 	golang.org/x/sync v0.7.0
 	gomodules.xyz/jsonpatch/v2 v2.4.0
+	gopkg.in/DataDog/dd-trace-go.v1 v1.65.1
 	gopkg.in/yaml.v2 v2.4.0
 	k8s.io/api v0.28.4
 	k8s.io/apimachinery v0.28.4
@@ -38,23 +43,34 @@ require (
 )
 
 require (
+	github.com/DataDog/appsec-internal-go v1.6.0 // indirect
+	github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect
+	github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 // indirect
+	github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
+	github.com/DataDog/go-libddwaf/v3 v3.2.1 // indirect
+	github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
+	github.com/DataDog/sketches-go v1.4.5 // indirect
+	github.com/Microsoft/go-winio v0.6.1 // indirect
 	github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
-	github.com/aws/aws-sdk-go v1.44.122 // indirect
+	github.com/aws/aws-sdk-go v1.44.327 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/boombuler/barcode v1.0.1 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
 	github.com/cloudflare/circl v1.3.7 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
+	github.com/dustin/go-humanize v1.0.1 // indirect
+	github.com/ebitengine/purego v0.6.0-alpha.5 // indirect
 	github.com/emicklei/go-restful/v3 v3.11.0 // indirect
 	github.com/evanphx/json-patch/v5 v5.7.0 // indirect
 	github.com/fsnotify/fsnotify v1.7.0 // indirect
 	github.com/ghodss/yaml v1.0.0 // indirect
 	github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-logr/zapr v1.3.0 // indirect
 	github.com/go-openapi/jsonpointer v0.20.0 // indirect
 	github.com/go-openapi/jsonreference v0.20.2 // indirect
 	github.com/go-openapi/swag v0.22.4 // indirect
-	github.com/go-sql-driver/mysql v1.4.1 // indirect
+	github.com/go-sql-driver/mysql v1.6.0 // indirect
 	github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -65,9 +81,9 @@ require (
 	github.com/google/gofuzz v1.2.0 // indirect
 	github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a // indirect
 	github.com/gruntwork-io/go-commons v0.8.0 // indirect
-	github.com/hashicorp/errwrap v1.0.0 // indirect
+	github.com/hashicorp/errwrap v1.1.0 // indirect
 	github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
-	github.com/hashicorp/go-multierror v1.1.0 // indirect
+	github.com/hashicorp/go-multierror v1.1.1 // indirect
 	github.com/imdario/mergo v0.3.16 // indirect
 	github.com/jmespath/go-jmespath v0.4.0 // indirect
 	github.com/josharian/intern v1.0.0 // indirect
@@ -80,23 +96,32 @@ require (
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
-	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/outcaste-io/ristretto v0.2.3 // indirect
+	github.com/philhofer/fwd v1.1.2 // indirect
+	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
 	github.com/pquerna/otp v1.2.0 // indirect
 	github.com/prometheus/client_model v0.5.0 // indirect
 	github.com/prometheus/common v0.45.0 // indirect
 	github.com/prometheus/procfs v0.12.0 // indirect
 	github.com/russross/blackfriday/v2 v2.1.0 // indirect
+	github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
 	github.com/spf13/pflag v1.0.5 // indirect
 	github.com/stretchr/objx v0.5.2 // indirect
+	github.com/tinylib/msgp v1.1.8 // indirect
 	github.com/urfave/cli v1.22.2 // indirect
+	go.opentelemetry.io/otel/metric v1.27.0 // indirect
+	go.opentelemetry.io/otel/sdk v1.27.0 // indirect
+	go.opentelemetry.io/otel/trace v1.27.0 // indirect
+	go.uber.org/atomic v1.11.0 // indirect
 	golang.org/x/crypto v0.22.0 // indirect
 	golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
-	golang.org/x/sys v0.19.0 // indirect
+	golang.org/x/mod v0.14.0 // indirect
+	golang.org/x/sys v0.20.0 // indirect
 	golang.org/x/term v0.19.0 // indirect
 	golang.org/x/text v0.14.0 // indirect
 	golang.org/x/time v0.4.0 // indirect
 	golang.org/x/tools v0.17.0 // indirect
-	google.golang.org/appengine v1.6.8 // indirect
+	golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
 	google.golang.org/protobuf v1.33.0 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index f1dd2cc993..3a3bc62837 100644
--- a/go.sum
+++ b/go.sum
@@ -1,12 +1,31 @@
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/DataDog/appsec-internal-go v1.6.0 h1:QHvPOv/O0s2fSI/BraZJNpRDAtdlrRm5APJFZNBxjAw=
+github.com/DataDog/appsec-internal-go v1.6.0/go.mod h1:pEp8gjfNLtEOmz+iZqC8bXhu0h4k7NUsW/qiQb34k1U=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 h1:bUMSNsw1iofWiju9yc1f+kBd33E3hMJtq9GuU602Iy8=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0/go.mod h1:HzySONXnAgSmIQfL6gOv9hWprKJkx8CicuXuUbmgWfo=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 h1:5nE6N3JSs2IG3xzMthNFhXfOaXlrsdgqmJ73lndFf8c=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1/go.mod h1:Vc+snp0Bey4MrrJyiV2tVxxJb6BmLomPvN1RgAvjGaQ=
+github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
+github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
+github.com/DataDog/go-libddwaf/v3 v3.2.1 h1:lZPc6UxCOwioHc++nsldKR50FpIrRh1uGnGLuryqnE8=
+github.com/DataDog/go-libddwaf/v3 v3.2.1/go.mod h1:AP+7Atb8ftSsrha35wht7+K3R+xuzfVSQhabSO4w6CY=
+github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I=
+github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
+github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4=
+github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
+github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
+github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg=
+github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
 github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE=
 github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
 github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
 github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo=
-github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY=
+github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
@@ -16,6 +35,7 @@ github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl
 github.com/bradleyfalzon/ghinstallation/v2 v2.8.0 h1:yUmoVv70H3J4UOqxqsee39+KlXxNEDfTbAp8c/qULKk=
 github.com/bradleyfalzon/ghinstallation/v2 v2.8.0/go.mod h1:fmPmvCiBWhJla3zDv9ZTQSZc8AbwyRnGW1yg5ep1Pcs=
 github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
 github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
@@ -26,8 +46,19 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng
 github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
+github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg=
+github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
+github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY=
+github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ=
 github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
 github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
 github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
@@ -35,6 +66,10 @@ github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
 github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
 github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
 github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
+github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
+github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
+github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@@ -44,9 +79,12 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
 github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
 github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU=
 github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
 github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
 github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
 github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
 github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
@@ -57,8 +95,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
 github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
 github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
 github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
@@ -68,6 +106,7 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW
 github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
 github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -76,7 +115,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
 github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
 github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
@@ -109,16 +147,24 @@ github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRa
 github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78=
 github.com/gruntwork-io/terratest v0.46.7 h1:oqGPBBO87SEsvBYaA0R5xOq+Lm2Xc5dmFVfxEolfZeU=
 github.com/gruntwork-io/terratest v0.46.7/go.mod h1:6gI5MlLeyF+SLwqocA5GBzcTix+XiuxCy1BPwKuT+WM=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
 github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
 github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
-github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
+github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
 github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
 github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
+github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
 github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
@@ -145,8 +191,12 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
 github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
 github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
 github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
 github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
 github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
 github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg=
 github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
@@ -154,6 +204,8 @@ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvls
 github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
 github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
 github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -175,10 +227,17 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
 github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE=
 github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
+github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
+github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
+github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok=
 github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
 github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
@@ -189,13 +248,22 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne
 github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
 github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
 github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY=
+github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI=
 github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
 github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
 github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -209,6 +277,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
@@ -216,11 +285,33 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
 github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
 github.com/teambition/rrule-go v1.8.2 h1:lIjpjvWTj9fFUZCmuoVDrKVOtdiyzbzc93qTmRVe/J8=
 github.com/teambition/rrule-go v1.8.2/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
+github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
+github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
 github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
 github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48=
+go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
+go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0 h1:6aGq6rMOdOx9B385JpF1OpeL18+6Ho8bTFdxy10oEGY=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0/go.mod h1:fdZI+pB2Y6Dpl3Uf+1ZPrkX6cnwsUAhjK1f9yCAlJIM=
+go.opentelemetry.io/otel/log v0.3.0 h1:kJRFkpUFYtny37NQzL386WbznUByZx186DpEMKhEGZs=
+go.opentelemetry.io/otel/log v0.3.0/go.mod h1:ziCwqZr9soYDwGNbIL+6kAvQC+ANvjgG367HVcyR/ys=
+go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
+go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
+go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
+go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
+go.opentelemetry.io/otel/sdk/log v0.3.0 h1:GEjJ8iftz2l+XO1GF2856r7yYVh74URiF9JMcAacr5U=
+go.opentelemetry.io/otel/sdk/log v0.3.0/go.mod h1:BwCxtmux6ACLuys1wlbc0+vGBd+xytjmjajwqqIul2g=
+go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
+go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
 go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
 go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
 go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -239,8 +330,12 @@ golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8
 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -248,9 +343,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
 golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
 golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
 golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
@@ -261,6 +358,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
@@ -278,19 +376,25 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
-golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
 golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
 golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
 golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
@@ -298,8 +402,8 @@ golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
 golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
 golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
@@ -311,7 +415,9 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
 golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
 golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
 golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
@@ -319,10 +425,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
 gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
 gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -333,6 +439,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
 google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/DataDog/dd-trace-go.v1 v1.65.1 h1:Ne7kzWr/br/jwhUJR7CnqPl/mUpNxa6LfgZs0S4htZM=
+gopkg.in/DataDog/dd-trace-go.v1 v1.65.1/go.mod h1:beNFIWd/H04d0k96cfltgiDH2+t0T5sDbyYLF3VTXqk=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -349,6 +457,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/gotraceui v0.2.0 h1:dmNsfQ9Vl3GwbiVD7Z8d/osC6WtGGrasyrC2suc4ZIQ=
+honnef.co/go/gotraceui v0.2.0/go.mod h1:qHo4/W75cA3bX0QQoSvDjbJa4R8mAyyFjbWAj63XElc=
 k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
 k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
 k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08=
diff --git a/instrument.sh b/instrument.sh
new file mode 100755
index 0000000000..6a088588e5
--- /dev/null
+++ b/instrument.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+set -e
+
+git reset --hard origin/master
+
+go get go.opentelemetry.io/otel
+go get gopkg.in/DataDog/dd-trace-go.v1
+go get github.com/davecgh/go-spew
+go get github.com/pmezard/go-difflib
+
+find . -name "*.go" | grep -E '(cmd/ghalistener|cmd/githubrunnerscalesetlistener|controllers/actions.github.com)' | xargs -I{} go-instrument -app arc -w -filename {}
+
+git add -u
+git commit -m 'chore: goinstrument'
+
+git cherry-pick a6d6f3e
diff --git a/prep_fork.sh b/prep_fork.sh
new file mode 100755
index 0000000000..6c08947398
--- /dev/null
+++ b/prep_fork.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+
+set -e
+
+for f in arc-publish.yaml arc-{publish,validate}-chart.yaml \
+         arc-{release-runners,validate-runners,update-runners-scheduled}.yaml gha-{publish,validate}-chart.yaml \
+         global-run-{first-interaction,stale}.yaml; do
+    echo "Processing $f"
+    git rm .github/workflows/$f
+done
+
+git commit -m "Remove workflows unused in a forked repo"
+
+# cherry-pick: Remove legacy-canary-build job from global-publish-canary.yaml as unused in a forked repo
+git cherry-pick 842b16d71498a5f2a1fc17c0917372435464d49c