diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index f945cd04398..00000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,1387 +0,0 @@ -/** - * Jenkins build definition. This file defines the entire build pipeline. - */ -import java.time.LocalDateTime -import java.time.format.DateTimeFormatter -import jenkins.model.Jenkins - -SRC_STASH_NAME = 'src' -TARGETS_STASH_NAME = 'targets' -DEV_DOCKER_IMAGE = 'gcr.io/pixie-oss/pixie-dev-public/dev_image' -DEV_DOCKER_IMAGE_EXTRAS = 'gcr.io/pixie-oss/pixie-dev-public/dev_image_with_extras' -GCLOUD_DOCKER_IMAGE = 'google/cloud-sdk:412.0.0-alpine' -GIT_DOCKER_IMAGE = 'bitnami/git:2.33.0' - -K8S_PROD_CLUSTER = 'https://cloud-prod.internal.corp.pixielabs.ai' -// Our staging instance used to be run on our prod cluster. These creds are -// actually the creds for our prod cluster. -K8S_PROD_CREDS = 'cloud-staging' - -K8S_STAGING_CLUSTER = 'https://cloud-staging.internal.corp.pixielabs.ai' -K8S_STAGING_CREDS = 'pixie-prod-staging-cluster' - -K8S_TESTING_CLUSTER = 'https://cloud-testing.internal.corp.pixielabs.ai' -K8S_TESTING_CREDS = 'pixie-prod-testing-cluster' - -// PXL Docs variables. -PXL_DOCS_BINARY = '//src/carnot/docstring:docstring' -PXL_DOCS_FILE = 'pxl-docs.json' -PXL_DOCS_BUCKET = 'pl-docs' -PXL_DOCS_GCS_PATH = "gs://${PXL_DOCS_BUCKET}/${PXL_DOCS_FILE}" - -// BPF Setup. -// The default kernel should be the oldest supported kernel -// to ensure that we don't have BPF compatibility regressions. -BPF_DEFAULT_KERNEL = '4.14' -BPF_NEWEST_KERNEL = '5.19' -BPF_KERNELS = ['4.14', '4.19', '5.4', '5.10', '5.15', '5.19'] -BPF_KERNELS_TO_TEST = [BPF_DEFAULT_KERNEL, BPF_NEWEST_KERNEL] - -// Currently disabling TSAN on BPF builds because it runs too slow. -// In particular, the uprobe deployment takes far too long. See issue: -// https://pixie-labs.atlassian.net/browse/PL-1329 -// The benefit of TSAN on such runs is marginal anyways, because the tests -// are mostly single-threaded. -runBPFWithTSAN = false - -// TODO(yzhao/oazizi): PP-2276 Fix the BPF ASAN tests. -runBPFWithASAN = false - -// This variable store the dev docker image that we need to parse before running any docker steps. -devDockerImageWithTag = '' -devDockerImageExtrasWithTag = '' - -stashList = [] - -// Flag controlling if coverage job is enabled. -isMainCodeReviewRun = (env.JOB_NAME == 'pixie-oss/build-and-test-pr') -isReleaseRun = env.JOB_NAME.startsWith('pixie-release/') - -isMainRun = (env.JOB_NAME == 'pixie-main/build-and-test-all') -isNightlyTestRegressionRun = (env.JOB_NAME == 'pixie-main/nightly-test-regression') -isNightlyBPFTestRegressionRun = (env.JOB_NAME == 'pixie-main/nightly-test-regression-bpf') - -isOSSMainRun = (env.JOB_NAME == 'pixie-oss/build-and-test-all') -isOSSCloudBuildRun = env.JOB_NAME.startsWith('pixie-release/cloud/') -isOSSCodeReviewRun = env.JOB_NAME == 'pixie-oss/build-and-test-pr' -isOSSRun = isOSSMainRun || isOSSCloudBuildRun || isOSSCodeReviewRun || isReleaseRun - -isCloudProdBuildRun = env.JOB_NAME.startsWith('pixie-release/cloud-prod/') -isCloudStagingBuildRun = env.JOB_NAME.startsWith('pixie-release/cloud-staging/') -isStirlingPerfEval = (env.JOB_NAME == 'pixie-main/stirling-perf-eval') - -GCS_STASH_BUCKET = isOSSRun ? 'px-jenkins-build-oss' : 'px-jenkins-build-temp' -GCP_PROJECT = isOSSRun ? 'pixie-oss' : 'pl-dev-infra' -BES_GCE_FILE = isOSSRun ? 'ci/bes-oss-gce.bazelrc' : 'ci/bes-gce.bazelrc' -BES_K8S_FILE = isOSSRun ? 'ci/bes-oss-k8s.bazelrc' : 'ci/bes-k8s.bazelrc' - -// Build tags are used to modify the behavior of the build. -// Note: Tags only work for code-review builds. -// Enable the BPF build, even if it's not required. -buildTagBPFBuild = false -// Enable BPF build across all tested kernels. -buildTagBPFBuildAllKernels = false - -def gsutilCopy(String src, String dest) { - container('gcloud') { - sh "gsutil -o GSUtil:parallel_composite_upload_threshold=150M cp ${src} ${dest}" - } -} - -def bbLinks() { - return "--build_metadata=BUILDBUDDY_LINKS='[Jenkins](${BUILD_URL})'" -} - -def stashOnGCS(String name, String pattern) { - def destFile = "${name}.tar.gz" - sh "mkdir -p .archive && tar --exclude=.archive -czf .archive/${destFile} ${pattern}" - - gsutilCopy(".archive/${destFile}", "gs://${GCS_STASH_BUCKET}/${env.BUILD_TAG}/${destFile}") -} - -def fetchFromGCS(String name) { - def srcFile = "${name}.tar.gz" - sh 'mkdir -p .archive' - - gsutilCopy("gs://${GCS_STASH_BUCKET}/${env.BUILD_TAG}/${srcFile}", ".archive/${srcFile}") -} - -def unstashFromGCS(String name) { - def srcFile = "${name}.tar.gz" - fetchFromGCS(name) - // Note: The tar extraction must use `--no-same-owner`. - // Without this, the owner of some third_party files become invalid users, - // which causes some cmake projects to fail with "failed to preserve ownership" messages. - sh """ - tar -zxf .archive/${srcFile} --no-same-owner - rm -f .archive/${srcFile} - """ -} - -def shFileExists(String f) { - return sh( - script: "test -f ${f}", - returnStatus: true) == 0 -} - -def shFileEmpty(String f) { - return sh( - script: "test -s ${f}", - returnStatus: true) != 0 -} - -def createBazelStash(String stashName) { - sh ''' - rm -rf bazel-testlogs-archive - mkdir -p bazel-testlogs-archive - cp -a bazel-testlogs/ bazel-testlogs-archive || true - ''' - stashOnGCS(stashName, 'bazel-testlogs-archive/**') - stashList.add(stashName) -} - -def retryOnK8sDownscale(Closure body, int times=5) { - for (int retryCount = 0; retryCount < times; retryCount++) { - try { - body() - return - } catch (io.fabric8.kubernetes.client.KubernetesClientException e) { - println("Caught ${e}, assuming K8s cluster downscaled, will retry.") - // Sleep an extra 5 seconds for each retry attempt. - def interval = (retryCount + 1) * 5 - sleep interval - continue - } catch (e) { - println("Unhandled ${e}, assuming fatal error.") - throw e - } - } -} - -def fetchSourceK8s(Closure body) { - container('gcloud') { - unstashFromGCS(SRC_STASH_NAME) - sh 'git config --global --add safe.directory `pwd`' - if (isOSSCodeReviewRun || isOSSMainRun) { - sh 'cp ci/bes-oss-k8s.bazelrc bes.bazelrc' - } else { - sh 'cp ci/bes-k8s.bazelrc bes.bazelrc' - } - } - body() -} - -def fetchSourceAndTargetsK8s(Closure body) { - fetchSourceK8s { - container('gcloud') { - unstashFromGCS(TARGETS_STASH_NAME) - } - body() - } -} - -def runBazelCmd(String f, String targetConfig, int retries = 5) { - def retval = sh( - script: "bazel ${f} ${bbLinks()} --build_metadata=CONFIG=${targetConfig}", - returnStatus: true - ) - - if (retval == 38 && (targetConfig == 'tsan' || targetConfig == 'asan')) { - // If bes update failed for a sanitizer run, re-run to get the real retval. - if (retries == 0) { - println('Bazel bes update failed for sanitizer run after multiple retries.') - return retval - } - println('Bazel bes update failed for sanitizer run, retrying...') - return runBazelCmd(f, targetConfig, retries - 1) - } - // 4 means that tests not present. - // 38 means that bes update failed. - // Both are not fatal. - if (retval == 0 || retval == 4 || retval == 38) { - if (retval != 0) { - println("Bazel returned ${retval}, ignoring...") - } - return 0 - } - return retval -} -/** - * Runs bazel CI. - * - * The targetFilter can either be a bazel filter clause, or bazel path (//..., etc.), but not a list of paths. - */ -def bazelCICmd(String name, String targetConfig='clang', String targetCompilationMode='opt', - String targetsSuffix, String bazelRunExtraArgs='') { - def buildableFile = "bazel_buildables_${targetsSuffix}" - def testFile = "bazel_tests_${targetsSuffix}" - - def args = "-c ${targetCompilationMode} --config=${targetConfig} --build_metadata=COMMIT_SHA=\$(git rev-parse HEAD) ${bazelRunExtraArgs}" - - def buildRet = runBazelCmd("build ${args} --target_pattern_file ${buildableFile}", targetConfig) - if (buildRet != 0) { - unstable('Bazel build failed') - } - def testRet = runBazelCmd("test ${args} --target_pattern_file ${testFile}", targetConfig) - if (testRet == 0 || testRet == 3) { - // Create stash even when we get 3 as a retval which indicates some tests failed. - // This allows the test reporter to report on failing test names/counts. - createBazelStash("${name}-testlogs") - } - if (testRet != 0) { - unstable('Bazel test failed') - } -} - -def processBazelLogs(String logBase) { - step([ - $class: 'XUnitPublisher', - thresholds: [ - [ - $class: 'FailedThreshold', - unstableThreshold: '1' - ] - ], - tools: [ - [ - $class: 'GoogleTestType', - skipNoTestFiles: true, - pattern: "${logBase}/bazel-testlogs-archive/**/*.xml" - ] - ] - ]) -} - -def processAllExtractedBazelLogs() { - stashList.each { stashName -> - if (stashName.endsWith('testlogs')) { - processBazelLogs(stashName) - } - } -} - -def sendSlackNotification() { - if (currentBuild.result != 'SUCCESS' && currentBuild.result != null) { - slackSend color: '#FF0000', message: "FAILED: Build - ${env.BUILD_TAG} -- URL: ${env.BUILD_URL}." - } - else if (currentBuild.getPreviousBuild() && - currentBuild.getPreviousBuild().getResult().toString() != 'SUCCESS') { - slackSend color: '#00FF00', message: "PASSED(Recovered): Build - ${env.BUILD_TAG} -- URL: ${env.BUILD_URL}." - } -} - -def sendCloudReleaseSlackNotification(String profile) { - if (currentBuild.result == 'SUCCESS' || currentBuild.result == null) { - slackSend color: '#00FF00', message: "${profile} Cloud deployed - ${env.BUILD_TAG} -- URL: ${env.BUILD_URL}." - } else { - slackSend color: '#FF0000', message: "${profile} Cloud deployed FAILED - ${env.BUILD_TAG} -- URL: ${env.BUILD_URL}." - } -} - -def postBuildActions = { - if (!isOSSRun) { - sendSlackNotification() - } - if (isOSSMainRun) { - step([$class: "GitHubCommitStatusSetter",]); - } -} - -def initializeRepoState() { - sh 'git config --global --add safe.directory $(pwd)' - sh './ci/save_version_info.sh' - sh './ci/save_diff_info.sh' - - withCredentials([ - string( - credentialsId: 'buildbuddy-api-key', - variable: 'BUILDBUDDY_API_KEY' - ), - string( - credentialsId: 'github-license-ratelimit', - variable: 'GH_API_KEY' - ) - ]) { - sh './ci/write_bazelrc.sh' - } - - // Get docker image tag. - def properties = readProperties file: 'docker.properties' - devDockerImageWithTag = "${DEV_DOCKER_IMAGE}:${properties.DOCKER_IMAGE_TAG}@sha256:${properties.DEV_IMAGE_DIGEST}" - devDockerImageExtrasWithTag = "${DEV_DOCKER_IMAGE_EXTRAS}:${properties.DOCKER_IMAGE_TAG}@sha256:${properties.DEV_IMAGE_WITH_EXTRAS_DIGEST}" - - stashOnGCS(SRC_STASH_NAME, '.') -} - -// K8s related helpers -def gcloudContainer() { - containerTemplate(name: 'gcloud', image: GCLOUD_DOCKER_IMAGE, command: 'cat', ttyEnabled: true) -} - -def gitContainer() { - containerTemplate(name: 'git', image: GIT_DOCKER_IMAGE, command: 'cat', ttyEnabled: true) -} - -def pxdevContainer(boolean needExtras=false) { - def image = needExtras ? devDockerImageExtrasWithTag : devDockerImageWithTag - containerTemplate(name: 'pxdev', image: image, command: 'cat', ttyEnabled: true) -} - -def pxbuildContainer(boolean needExtras=false) { - def image = needExtras ? devDockerImageExtrasWithTag : devDockerImageWithTag - containerTemplate( - name: 'pxbuild', image: image, command: 'cat', ttyEnabled: true, - resourceRequestMemory: '58368Mi', resourceRequestCpu: '30000m', - ) -} - -pxbuildPodPatch = ''' -spec: - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: pxbuild - securityContext: - capabilities: - add: - - SYS_PTRACE -''' - -def retryPodTemplate(String suffix, List containers, Closure body) { - warnError('Script failed') { - retryOnK8sDownscale { - def label = "worker-${env.BUILD_TAG}-${suffix}" - podTemplate(label: label, cloud: 'devinfra-cluster-usw1-0', containers: containers) { - node(label) { - body() - } - } - } - } -} - -def pxbuildRetryPodTemplate(String suffix, boolean needExtras=false, Closure body) { - warnError('Script failed') { - retryOnK8sDownscale { - def label = "worker-${env.BUILD_TAG}-${suffix}" - podTemplate( - label: label, cloud: 'devinfra-cluster-usw1-0', containers: [ - pxbuildContainer(needExtras), gcloudContainer(), - ], - yaml: pxbuildPodPatch, - yamlMergeStrategy: merge(), - hostNetwork: true, - volumes: [ - hostPathVolume(mountPath: '/var/run/docker.sock', hostPath: '/var/run/docker.sock'), - hostPathVolume(mountPath: '/var/lib/docker', hostPath: '/var/lib/docker'), - hostPathVolume(mountPath: '/mnt/disks/jenkins/sharedDir', hostPath: '/mnt/disks/jenkins/sharedDir') - ]) { - node(label) { - container('pxbuild') { - sh 'git config --global --add safe.directory `pwd`' - } - body() - } - } - } - } -} - -def pxbuildWithSourceK8s(String suffix, boolean needExtras=false, Closure body) { - pxbuildRetryPodTemplate(suffix, needExtras) { - fetchSourceK8s { - timeout(time: 90, unit: 'MINUTES') { - body() - } - } - } -} - -def pxbuildWithSourceAndTargetsK8s(String suffix, boolean needExtras=false, Closure body) { - pxbuildRetryPodTemplate(suffix, needExtras) { - fetchSourceAndTargetsK8s { - timeout(time: 90, unit: 'MINUTES') { - body() - } - } - } -} - -/** - * Checkout the source code, record git info and stash sources. - */ -def checkoutAndInitialize() { - retryPodTemplate('init', [gcloudContainer()]) { - container('gcloud') { - deleteDir() - checkout scm - initializeRepoState() - if (isOSSCodeReviewRun) { - def logMessage = sh( - script: 'git log origin/main..', - returnStdout: true, - ).trim() - - def hasTag = { log, tag -> (log ==~ "(?s).*#ci:${tag }(\\s|\$).*") } - - buildTagBPFBuild = hasTag(logMessage, 'bpf-build') - buildTagBPFBuildAllKernels = hasTag(logMessage, 'bpf-build-all-kernels') - } - } - } -} - -def enableForTargets(String targetName, Closure body) { - if (!shFileEmpty("bazel_buildables_${targetName}") || !shFileEmpty("bazel_tests_${targetName}")) { - body() - } -} - -/***************************************************************************** - * BUILDERS: This sections defines all the build steps that will happen in parallel. - *****************************************************************************/ -def preBuild = [:] -def builders = [:] - -def buildAndTestOptWithUI = { - pxbuildWithSourceAndTargetsK8s('build-opt') { - container('pxbuild') { - bazelCICmd('build-opt', 'clang', 'opt', 'clang_opt', '--action_env=GOOGLE_APPLICATION_CREDENTIALS') - } - } -} - -def buildClangTidy = { - pxbuildWithSourceK8s('clang-tidy') { - container('pxbuild') { - def stashName = 'build-clang-tidy-logs' - if (isMainRun) { - // For main builds we run clang tidy on changes files in the past 10 revisions, - // this gives us a good balance of speed and coverage. - sh 'ci/run_clang_tidy.sh -f diff_head_cc' - } else { - // For code review builds only run on diff. - sh 'ci/run_clang_tidy.sh -f diff_origin_main_cc' - } - stashOnGCS(stashName, 'clang_tidy.log') - stashList.add(stashName) - } - } -} - -def buildDbg = { - pxbuildWithSourceAndTargetsK8s('build-dbg') { - container('pxbuild') { - bazelCICmd('build-dbg', 'clang', 'dbg', 'clang_dbg', '--action_env=GOOGLE_APPLICATION_CREDENTIALS') - } - } -} - -def buildGoRace = { - pxbuildWithSourceAndTargetsK8s('build-go-race') { - container('pxbuild') { - bazelCICmd('build-go-race', 'go_race', 'opt', 'go_race') - } - } -} - -def buildASAN = { - pxbuildWithSourceAndTargetsK8s('build-asan') { - container('pxbuild') { - bazelCICmd('build-asan', 'asan', 'dbg', 'sanitizer') - } - } -} - -def buildTSAN = { - pxbuildWithSourceAndTargetsK8s('build-tsan') { - container('pxbuild') { - bazelCICmd('build-tsan', 'tsan', 'dbg', 'sanitizer') - } - } -} - -def buildGCC = { - pxbuildWithSourceAndTargetsK8s('build-gcc-opt') { - container('pxbuild') { - bazelCICmd('build-gcc-opt', 'gcc', 'opt', 'gcc_opt') - } - } -} - -def bazelCICmdBPFonGCE(String name, String targetConfig='clang', String targetCompilationMode='opt', - String targetsSuffix, String bazelRunExtraArgs='', String kernel=BPF_DEFAULT_KERNEL) { - def buildableFile = "bazel_buildables_${targetsSuffix}" - def testFile = "bazel_tests_${targetsSuffix}" - def bazelArgs = "-c ${targetCompilationMode} --config=${targetConfig} --build_metadata=COMMIT_SHA=\$(git rev-parse HEAD) ${bazelRunExtraArgs}" - def stashName = "${name}-${kernel}-testlogs" - - fetchFromGCS(SRC_STASH_NAME) - fetchFromGCS(TARGETS_STASH_NAME) - - def retval = sh( - script: """ - export BUILDABLE_FILE="${buildableFile}" - export TEST_FILE="${testFile}" - export BAZEL_ARGS="${bazelArgs}" - export STASH_NAME="${stashName}" - export GCS_STASH_BUCKET="${GCS_STASH_BUCKET}" - export BUILD_TAG="${BUILD_TAG}" - export KERNEL_VERSION="${kernel}" - export GCP_PROJECT="${GCP_PROJECT}" - export BES_FILE="${BES_GCE_FILE}" - ./ci/bpf/00_create_instance.sh - """, - returnStatus: true - ) - - if (retval == 0 || retval == 3) { - stashList.add(stashName) - } - if (retval != 0) { - unstable('Bazel BPF build/test failed') - } -} - -def buildAndTestBPFOpt = { kernel -> - retryPodTemplate('build-bpf-opt', [gcloudContainer()]) { - fetchSourceAndTargetsK8s { - container('gcloud') { - bazelCICmdBPFonGCE('build-bpf', 'bpf', 'opt', 'bpf', '', kernel) - } - } - } -} - -def buildAndTestBPFASAN = { kernel -> - retryPodTemplate('build-bpf-asan', [gcloudContainer()]) { - fetchSourceAndTargetsK8s { - container('gcloud') { - bazelCICmdBPFonGCE('build-bpf-asan', 'bpf_asan', 'dbg', 'bpf_sanitizer', '', kernel) - } - } - } -} - -def buildAndTestBPFTSAN = { kernel -> - retryPodTemplate('build-bpf-tsan', [gcloudContainer()]) { - fetchSourceAndTargetsK8s { - container('gcloud') { - bazelCICmdBPFonGCE('build-bpf-tsan', 'bpf_tsan', 'dbg', 'bpf_sanitizer', '', kernel) - } - } - } -} - -def generateTestTargets = { - enableForTargets('clang_opt') { - builders['Build & Test (clang:opt + UI)'] = buildAndTestOptWithUI - } - - enableForTargets('clang_tidy') { - builders['Clang-Tidy'] = buildClangTidy - } - - enableForTargets('clang_dbg') { - builders['Build & Test (dbg)'] = buildDbg - } - - enableForTargets('sanitizer') { - builders['Build & Test (asan)'] = buildASAN - } - - enableForTargets('sanitizer') { - builders['Build & Test (tsan)'] = buildTSAN - } - - enableForTargets('gcc_opt') { - builders['Build & Test (gcc:opt)'] = buildGCC - } - - enableForTargets('go_race') { - builders['Build & Test (go race detector)'] = buildGoRace - } - - BPF_KERNELS_TO_TEST.each { kernel -> - enableForTargets('bpf') { - builders["Build & Test (bpf tests - opt) - ${kernel}"] = { buildAndTestBPFOpt(kernel) } - } - - if (runBPFWithASAN) { - enableForTargets('bpf_sanitizer') { - builders["Build & Test (bpf tests - asan) - ${kernel}"] = { buildAndTestBPFASAN(kernel) } - } - } - - if (runBPFWithTSAN) { - enableForTargets('bpf_sanitizer') { - builders["Build & Test (bpf tests - tsan) - ${kernel}"] = { buildAndTestBPFTSAN(kernel) } - } - } - } -} - -preBuild['Process Dependencies'] = { - retryPodTemplate('process-deps', [gcloudContainer(), pxdevContainer()]) { - fetchSourceK8s { - container('pxdev') { - sh 'git config --global --add safe.directory `pwd`' - def forceAll = '' - def enableBPF = '' - - if (isMainRun || isNightlyTestRegressionRun || isOSSMainRun || isNightlyBPFTestRegressionRun) { - forceAll = '-a' - enableBPF = '-b' - } - - if (buildTagBPFBuild || buildTagBPFBuildAllKernels) { - enableBPF = '-b' - } - - sh """ - ./ci/bazel_build_deps.sh ${forceAll} ${enableBPF} - wc -l bazel_* - """ - - if (buildTagBPFBuildAllKernels) { - BPF_KERNELS_TO_TEST = BPF_KERNELS - } - - stashOnGCS(TARGETS_STASH_NAME, 'bazel_*') - generateTestTargets() - } - } - } -} - -if (isMainRun || isOSSMainRun) { - def codecovToken = 'pixie-codecov-token' - def slug = 'pixie-labs/pixielabs' - if (isOSSMainRun) { - codecovToken = 'pixie-oss-codecov-token' - slug = 'pixie-io/pixie' - } - // Only run coverage on main runs. - builders['Build & Test (gcc:coverage)'] = { - pxbuildWithSourceAndTargetsK8s('coverage') { - container('pxbuild') { - warnError('Coverage command failed') { - withCredentials([ - string( - credentialsId: codecovToken, - variable: 'CODECOV_TOKEN' - ) - ]) { - sh "ci/collect_coverage.sh -u -b main -c `cat GIT_COMMIT` -r " + slug - } - } - createBazelStash('build-gcc-coverage-testlogs') - } - } - } -} - -def buildScriptForOSSCloudRelease = { - try { - stage('Checkout code') { - checkoutAndInitialize() - } - stage('Build & Push Artifacts') { - pxbuildWithSourceK8s('build-and-push-oss-cloud', true) { - container('pxbuild') { - sh './ci/cloud_build_release.sh -p' - } - } - } - } - catch (err) { - currentBuild.result = 'FAILURE' - echo "Exception thrown:\n ${err}" - echo 'Stacktrace:' - err.printStackTrace() - } - postBuildActions() -} - -builders['Lint & Docs'] = { - pxbuildWithSourceAndTargetsK8s('lint') { - container('pxbuild') { - // Prototool relies on having a main branch in this checkout, so create one tracking origin/main - sh 'git branch main --track origin/main' - sh 'arc lint --trace --never-apply-patches' - } - } -} - -/***************************************************************************** - * END BUILDERS - *****************************************************************************/ - -def archiveBuildArtifacts = { - retryPodTemplate('archive', [gcloudContainer()]) { - container('gcloud') { - // Unstash the build artifacts. - stashList.each { stashName -> - dir(stashName) { - unstashFromGCS(stashName) - } - } - - // Remove the tests attempts directory because it - // causes the test publisher to mark as failed. - sh 'find . -name test_attempts -type d -exec rm -rf {} +' - - // Archive clang-tidy logs. - archiveArtifacts artifacts: 'build-clang-tidy-logs/**', fingerprint: true, allowEmptyArchive: true - - // Actually process the bazel logs to look for test failures. - processAllExtractedBazelLogs() - } - } -} - -/******************************************** - * The build script starts here. - ********************************************/ -def buildScriptForCommits = { - retryPodTemplate('root', [gcloudContainer()]) { - if (isMainRun || isOSSMainRun) { - def namePrefix = 'pixie-main' - if (isOSSMainRun) { - namePrefix = 'pixie-oss' - } - // If there is a later build queued up, we want to stop the current build so - // we can execute the later build instead. - def q = Jenkins.get().getQueue() - abortBuild = false - q.getItems().each { - if (it.task.getDisplayName() == 'build-and-test-all') { - // Use fullDisplayName to distinguish between pixie-oss and pixie-main builds. - if (it.task.getFullDisplayName().startsWith(namePrefix)) { - abortBuild = true - } - } - } - - if (abortBuild) { - echo 'Stopping current build because a later build is already enqueued' - return - } - } - - try { - stage('Checkout code') { - checkoutAndInitialize() - } - stage('Pre-Build') { - parallel(preBuild) - } - stage('Build Steps') { - parallel(builders) - } - stage('Archive') { - archiveBuildArtifacts() - } - } - catch (err) { - currentBuild.result = 'FAILURE' - echo "Exception thrown:\n ${err}" - echo 'Stacktrace:' - err.printStackTrace() - } - - postBuildActions() - } -} - -/***************************************************************************** - * REGRESSION_BUILDERS: This sections defines all the test regressions steps - * that will happen in parallel. - *****************************************************************************/ -def bpfRegressionBuilders = [:] - -BPF_KERNELS.each { kernel -> - bpfRegressionBuilders["Test (opt) ${kernel}"] = { - retryPodTemplate('build-bpf-opt', [gcloudContainer()]) { - fetchSourceAndTargetsK8s { - container('gcloud') { - bazelCICmdBPFonGCE('build-bpf', 'bpf', 'opt', 'bpf', '', kernel) - } - } - } - } -} - -/***************************************************************************** - * REGRESSION_BUILDERS: This sections defines all the test regressions steps - * that will happen in parallel. - *****************************************************************************/ -def regressionBuilders = [:] - -TEST_ITERATIONS = 5 - -regressionBuilders['Test (opt)'] = { - pxbuildWithSourceAndTargetsK8s('test-opt') { - container('pxbuild') { - bazelCICmd('build-opt', 'clang', 'opt', 'clang_opt', "--runs_per_test ${TEST_ITERATIONS}") - } - } -} - -regressionBuilders['Test (ASAN)'] = { - pxbuildWithSourceAndTargetsK8s('test-asan') { - container('pxbuild') { - bazelCICmd('build-asan', 'asan', 'dbg', 'sanitizer', "--runs_per_test ${TEST_ITERATIONS}") - } - } -} - -regressionBuilders['Test (TSAN)'] = { - pxbuildWithSourceAndTargetsK8s('test-tsan') { - container('pxbuild') { - bazelCICmd('build-tsan', 'tsan', 'dbg', 'sanitizer', "--runs_per_test ${TEST_ITERATIONS}") - } - } -} - -/***************************************************************************** - * END REGRESSION_BUILDERS - *****************************************************************************/ - -/***************************************************************************** - * STIRLING PERF BUILDERS: Create & deploy, wait, then measure CPU use. - *****************************************************************************/ - -def clusterNames = (params.CLUSTER_NAMES != null) ? params.CLUSTER_NAMES.split(',') : [''] -int numPerfEvals = (params.NUM_EVAL_RUNS != null) ? Integer.parseInt(params.NUM_EVAL_RUNS) : 5 -int warmupMinutes = (params.WARMUP_MINUTES != null) ? Integer.parseInt(params.WARMUP_MINUTES) : 30 -int evalMinutes = (params.EVAL_MINUTES != null) ? Integer.parseInt(params.EVAL_MINUTES) : 60 -int profilerMinutes = (params.PROFILER_MINUTES != null) ? Integer.parseInt(params.PROFILER_MINUTES) : 5 -int cleanupClusters = (params.CLEANUP_CLUSTERS != null) ? Integer.parseInt(params.CLEANUP_CLUSTERS) : 1 -String groupName = (params.GROUP_NAME != null) ? params.GROUP_NAME : 'none' -String machineType = (params.MACHINE_TYPE != null) ? params.MACHINE_TYPE : 'n2-standard-4' -String experimentTag = (params.EXPERIMENT_TAG != null) ? params.EXPERIMENT_TAG : 'none' -String gitHashForPerfEval = (params.GIT_HASH_FOR_PERF_EVAL != null) ? params.GIT_HASH_FOR_PERF_EVAL : 'HEAD' -String imageTagForPerfEval = 'none' - -def stirlingPerfBuilders = [:] - -String getClusterNameDateString() { - date = LocalDateTime.now() - return date.format(DateTimeFormatter.ofPattern('yyyy-MM-dd-HH-mm-ss')) -} - -useCluster = { String clusterName -> - sh 'hostname' - sh 'gcloud --version' - sh "gcloud container clusters get-credentials ${clusterName} --project pl-pixies --zone us-west1-a" -} - -deleteCluster = { String clusterName -> - // We use 'delete || true' so that failure does not cause the entire pipeline to fail or go unstable. - // In particular, deleteCluster is invoked when createCluster fails; this has two scenarios: - // 1. The cluster was created, but the gcloud command failed anyway. - // 2. The cluster was not created. - // ... For (1) above, we expect cluster deletion to succeed. - // ... For (2) above, we invoke deleteCluster (because it is hard to know we are in this scenario), - // ... and we expect the command to fail, but we don't want the entire build/perf-eval to stop. - // In general, if clusters leak through, they are eventually cleaned up by the perf eval cluster - // cleanup cron job. - sh "gcloud container --project pl-pixies clusters delete ${clusterName} --zone us-west1-a --quiet || true" -} - -createCluster = { String clusterName -> - retryIdx = 0 - numRetries = 3 - - // We will uniquify the cluster name based on the retry count because there is some chance - // that gcloud will refuse to create a cluster (on retry) based on a name being in use. - // Here we create a local variable 'retryUniqueClusterName' that is distinct from 'clusterName' - // because 'clusterName' is curried into 'oneEval'. If we clobber 'clusterName' here, - // the currying becomes wrong and different evals will wrongly all pick up the same cluster name. - retryUniqueClusterName = null - - createClusterScript = 'scripts/create_gke_cluster.sh' - sh 'hostname' - sh 'gcloud components update' - sh 'gcloud --version' - retry(numRetries) { - if (retryIdx > 0) { - // Prevent leaking clusters from previous attempts. - deleteCluster(retryUniqueClusterName) - } - // Uniquify the cluster name based on the retryIdx because retry attempts - // may fail based on the pre-existing cluster name. - retryUniqueClusterName = clusterName + '-' + String.format('%d', retryIdx) - sh "${createClusterScript} -S -f -n 1 -c ${retryUniqueClusterName} -m ${machineType}" - ++retryIdx - } -} - -pxDeployForStirlingPerfEval = { - withCredentials([ - string( - // There are two credentials for perf-evals: - // 1. px-staging-user-api-key: staging cloud as pixie org. member. - // 2. px-stirling-perf-eval-user-api-key: staging cloud, as "perf-eval" (a different) org. - // Currently using (2) above because that isolates the perf evals from updates made to staging - // cloud by the cloud team, e.g. plugin scripts running (or not running). - credentialsId: 'px-stirling-perf-eval-user-api-key', - variable: 'THE_PIXIE_CLI_API_KEY' - ) - ]) { - withEnv(['PL_CLOUD_ADDR=staging.withpixie.dev:443']) { - // Useful if one wants to ssh in for debugging "Jenkins only" issues. - sh 'hostname' - - // Download the latest px binary. - // Deploy demo apps. - // Deploy pixie. - sh 'curl -fsSL https://storage.googleapis.com/pixie-dev-public/cli/latest/cli_linux_amd64 -o /usr/local/bin/px' - sh 'chmod +x /usr/local/bin/px' - sh 'px auth login --use_api_key --api_key ${THE_PIXIE_CLI_API_KEY}' - sh 'px demo deploy px-kafka -y -q' - sh 'px demo deploy px-sock-shop -y -q' - sh 'px demo deploy px-online-boutique -y -q' - sh 'px deploy -y -q' - - // Ensure skaffold is configured with the dev. image registry. - sh 'skaffold config set default-repo gcr.io/pl-dev-infra' - // Regenerate the json list of artifacts targeting the images built for this eval. - sh "skaffold build -p opt -t ${imageTagForPerfEval} --dry-run -q -f skaffold/skaffold_vizier.yaml > artifacts.json" - // Useful for local debug, or to verify the image tags. - sh 'cat artifacts.json' - // Skaffold deploy using perf-eval images generated in the build & push step. - sh 'cat artifacts.json | skaffold deploy -f skaffold/skaffold_vizier.yaml --build-artifacts -' - } - } -} - -def pxCollectPerfInfo(String clusterName, int evalIdx, int evalMinutes, int profilerMinutes) { - withCredentials([ - string( - credentialsId: 'px-stirling-perf-eval-user-api-key', - variable: 'THE_PIXIE_CLI_API_KEY' - ) - ]) { - withEnv(['PL_CLOUD_ADDR=staging.withpixie.dev:443']) { - // These should have been created when un-stashing the repo info. - assert fileExists('logs') - assert fileExists('logs/pod_resource_usage') - - // Show the cluster name (useful if results are strange and we suspect the wrong - // cluster was used for recording perf info). - sh 'kubectl config current-context' - - sh 'px auth login --use_api_key --api_key ${THE_PIXIE_CLI_API_KEY}' - sh "px run -f logs/pod_resource_usage -o json -- --start_time=-${evalMinutes}m 1> logs/perf.jsons 2> logs/perf.jsons.stderr" - - sh "px run px/perf_flamegraph -o json -- --start_time=-${profilerMinutes}m --pct_basis_entity=node --pod=pem 1> logs/stack-traces.jsons 2> logs/stack-traces.jsons.stderr" - sh "gcloud container clusters list --project pl-pixies --filter='name:${clusterName}' --format=json | tee logs/cluster-info.json" - - // Save the original results. - indexedEvalResultName = String.format('perf-eval-results-%02d', evalIdx) - stashOnGCS(indexedEvalResultName, 'logs') - } - } -} - -insertRecordsToPerfDB = { int evalIdx -> - perf_reqs = 'src/stirling/private/scripts/perf/requirements.txt' - perf_eval = 'src/stirling/private/scripts/perf/perf-eval.py' - withCredentials([ - usernamePassword( - credentialsId: 'stirling-perf-postgres', - usernameVariable: 'STIRLING_PERF_DB_USERNAME', - passwordVariable: 'STIRLING_PERF_DB_PASSWORD', - ) - ]) { - // perf-eval.py will read the git repo to find the commit hash & date/time. - // Here, we just fail fast in case the git repo is missing. - assert fileExists('.git') - - // Ensure git is configured correctly, and show repo state. - sh 'git config --global --add safe.directory $(pwd)' - sh 'git rev-parse HEAD' - - // Ensure requirements setup for perf-eval.py. - sh "pip3 install -r ${perf_reqs}" - - // Insert the perf records into the perf db. - // Retries exist because in rare cases, the perf db complains about too many API requests. - numRetries = 3 - retry(numRetries) { - sh "python3 ${perf_eval} insert-perf-records --jenkins --group-name ${groupName} --tag ${experimentTag} --idx ${evalIdx}" - } - } -} - -def getCurrentClusterName() { - def currentClusterName = sh( - script: 'kubectl config current-context', - returnStdout: true, - returnStatus: false - ).trim() - - // currentClusterName will look something like this: - // gke_pl-pixies_us-west1-a_stirling-perf-2022-08-24--0648-09-00-0 - // However, fro a GKE perspective the name is stirling-perf-2022-08-24--0648-09-00-0. - def tokens = currentClusterName.split('_') - currentClusterName = tokens.last() - return currentClusterName -} - -oneEval = { int evalIdx, String clusterName, boolean newClusterNeeded -> - int margin = 2 - int clusterCreationMinutes = 15 - int pixieDeployMinutes = 10 - int timeoutMinutes = margin * (clusterCreationMinutes + pixieDeployMinutes + warmupMinutes + evalMinutes) - - return { - pxbuildRetryPodTemplate('stirling-perf-eval') { - fetchSourceK8s { - timeout(time: timeoutMinutes, unit: 'MINUTES') { - container('pxbuild') { - // Unstash the "as built" repo info (see buildAndPushPemImagesForPerfEval). - // In more detail, here, we start with a fresh fully up-to-date source tree. The "as built" repo - // state will often be different (e.g. a particular diff or local experiment). - // That state is only known inside of buildAndPushPemImagesForPerfEval. Because we need that - // information, buildAndPushPemImagesForPerfEval is responsible for stashing the info on GCS, - // and here, we recover the saved state (in file 'logs/perf_eval_repo_info.bin'). - // The stash on GCS is needed because file system state is volatile in these build stages. - unstashFromGCS('perf-eval-repo-info') - assert fileExists('logs/perf_eval_repo_info.bin') - assert fileExists('logs/pod_resource_usage') - - if (newClusterNeeded) { - // Default behavior: create a new cluster for this perf eval. - stage('Create cluster.') { - createCluster(clusterName) - } - } else { - // A pre-existing cluster name was supplied to the build. - stage('Use cluster.') { - echo "clusterName: ${clusterName}." - useCluster(clusterName) - } - } - stage('Deploy pixie.') { - pxDeployForStirlingPerfEval() - } - stage('Warmup.') { - sh "sleep ${60 * warmupMinutes}" - } - stage('Evaluate.') { - sh "sleep ${60 * evalMinutes}" - } - stage('Collect.') { - pxCollectPerfInfo(getCurrentClusterName(), evalIdx, evalMinutes, profilerMinutes) - } - stage('Insert records to perf db.') { - insertRecordsToPerfDB(evalIdx) - } - if (newClusterNeeded) { - // Earlier, we had created a new cluster for this perf eval. - // Here, we clean up. - stage('Delete cluster.') { - if (cleanupClusters) { - deleteCluster(getCurrentClusterName()) - } else { - sh 'echo skipping cluster cleanup.' - } - } - } - } - } - } - } - } -} - -def savePodResourceUsagePxlScript() { - pod_resource_usage_path = 'src/pxl_scripts/private/b7ca1b62-6c9f-4a3f-a45d-a5bdffbcae6a/pod_resource_usage' - assert fileExists(pod_resource_usage_path) - sh 'mkdir -p logs/pod_resource_usage' - sh "cp ${pod_resource_usage_path}/* logs/pod_resource_usage" -} - -def saveRepoInfo() { - perf_reqs = 'src/stirling/private/scripts/perf/requirements.txt' - perf_eval = 'src/stirling/private/scripts/perf/perf-eval.py' - withCredentials([ - usernamePassword( - credentialsId: 'stirling-perf-postgres', - usernameVariable: 'STIRLING_PERF_DB_USERNAME', - passwordVariable: 'STIRLING_PERF_DB_PASSWORD', - ) - ]) { - sh 'mkdir -p logs' - sh "pip3 install -r ${perf_reqs}" - sh "python3 ${perf_eval} save-perf-record-repo-info-to-disk --jenkins" - stashOnGCS('perf-eval-repo-info', 'logs') - } -} - -def checkIfRequiredImagesExist() { - numImages = Integer.parseInt( - sh( - script: "cat artifacts.json | jq '.builds[].imageName' | wc -l", - returnStdout: true, - returnStatus: false - ).trim() - ) - - // Use the artifacts.json file and jq to build a list of all required images. - def requiredImages = [] - - for (int i = 0; i < numImages; i++) { - imageNameAndTag = sh(script: "cat artifacts.json | jq '.builds[${i}].tag'", returnStdout: true, returnStatus: false).trim() - requiredImages.add(imageNameAndTag) - } - - // allRequiredImagesExist will be set to false if we cannot find any one of the required images. - boolean allRequiredImagesExist = true - - for (imageNameAndTag in requiredImages) { - echo "Checking if image: ${imageNameAndTag} exists." - describeStatusCode = sh(script: "gcloud container images describe ${imageNameAndTag}", returnStdout: false, returnStatus: true) - - if (describeStatusCode != 0) { - echo "Image: ${imageNameAndTag} does not exist." - allRequiredImagesExist = false - break - } - else { - echo "Image: ${imageNameAndTag} exists." - } - } - - if (allRequiredImagesExist) { - sep = '\n... ' - echo "All images found:${sep}${requiredImages.join(sep)}" - } - return allRequiredImagesExist -} - -def checkoutTargetRepo(String gitHashForPerfEval) { - // Log out initial repo state. - sh 'echo "Starting repo state:" && git rev-parse HEAD' - - // GIT_HASH_FOR_PERF_EVAL branch. - // Here, we evaluate some commit that is merged into main. - // Alternately (to a SHA), the user can specify a string like "HEAD~3" or "some-branch". - // Build arg. GIT_HASH_FOR_PERF_EVAL is converted into sha, - // and used to construct the resulting image tag. - sh "echo 'Target repo state:' && git rev-parse ${gitHashForPerfEval}" - gitHashForPerfEval = sh(script: "git rev-parse ${gitHashForPerfEval}", returnStdout: true, returnStatus: false).trim() - sh "git checkout ${gitHashForPerfEval}" - imageTagForPerfEval = 'perf-eval-' + gitHashForPerfEval - - echo "Image tag for perf eval: ${imageTagForPerfEval}" - sh 'echo "Repo state:" && git rev-parse HEAD' - return imageTagForPerfEval -} - -buildAndPushPemImagesForPerfEval = { - pxbuildWithSourceK8s('pem-build-push') { - container('pxbuild') { - // We will need the repo, fail fast here if it is not available. - assert fileExists('.git') - - // Ensure repo is configured for use. - sh 'git config --global --add safe.directory $(pwd)' - - // Copy the pod resource utilization script into the logs directory, - // so that it is stashed along with repo info. - savePodResourceUsagePxlScript() - - imageTagForPerfEval = checkoutTargetRepo(gitHashForPerfEval) - saveRepoInfo() - - // Ensure skaffold is configured for dev. image registry. - sh 'skaffold config set default-repo gcr.io/pl-dev-infra' - - // Remote caching setup does not work correctly at this time: - // disable remote caching by removing this bazelrc file. - sh 'rm bes.bazelrc' - - // Save the image names & tags into artiacts.json, and log out the same info. - // Useful if one wants to cross check vs. the artifacts that we deploy later. - sh "skaffold build -p opt -t ${imageTagForPerfEval} -f skaffold/skaffold_vizier.yaml -q --dry-run | tee artifacts.json" - - allRequiredImagesExist = checkIfRequiredImagesExist() - - if (!allRequiredImagesExist) { - echo 'Building all images.' - sh "skaffold build -p opt -t ${imageTagForPerfEval} -f skaffold/skaffold_vizier.yaml" - } - } - } -} - -if (clusterNames[0].size()) { - // Useful for: - // ... debugging - // ... faster runs or iterations - // ... other special cases or special setups. - // This branch allows a user to specify which cluster(s) to run the perf eval on. - // (It will *not* create new clusters.) - // To enable, specify the cluster name(s) as a build param. For more than one cluster, - // use a comma separated list: - // my-dev-cluster-00,my-dev-cluster-01 - boolean newClusterNeeded = false - clusterNames.eachWithIndex { clusterName, i -> - title = "Eval ${i}." - perfEvaluator = oneEval.curry(i).curry(clusterName).curry(newClusterNeeded) - stirlingPerfBuilders[title] = perfEvaluator() - } -} else { - // Default path: no cluster names supplied to the build. - // The perf evals will create clusters. - boolean newClusterNeeded = true - for (int i = 0; i < numPerfEvals; i++) { - clusterName = 'stirling-perf-' + getClusterNameDateString() + '-' + String.format('%02d', i) - title = "Eval ${i}." - perfEvaluator = oneEval.curry(i).curry(clusterName).curry(newClusterNeeded) - stirlingPerfBuilders[title] = perfEvaluator() - } -} - -/***************************************************************************** - * END STIRLING PERF BUILDERS - *****************************************************************************/ - -def buildScriptForNightlyTestRegression = { testjobs -> - try { - stage('Checkout code') { - checkoutAndInitialize() - } - stage('Pre-Build') { - parallel(preBuild) - } - stage('Testing') { - parallel(testjobs) - } - stage('Archive') { - retryPodTemplate('archive', [gcloudContainer()]) { - container('gcloud') { - // Unstash the build artifacts. - stashList.each { stashName -> - dir(stashName) { - unstashFromGCS(stashName) - } - } - - // Remove the tests attempts directory because it - // causes the test publisher to mark as failed. - sh 'find . -name test_attempts -type d -exec rm -rf {} +' - - // Actually process the bazel logs to look for test failures. - processAllExtractedBazelLogs() - } - } - } - } - catch (err) { - currentBuild.result = 'FAILURE' - echo "Exception thrown:\n ${err}" - echo 'Stacktrace:' - err.printStackTrace() - } - - postBuildActions() -} - -def pushAndDeployCloud(String profile, String namespace, String clusterCreds, String clusterURL) { - pxbuildWithSourceK8s('build-and-push-cloud', true) { - container('pxbuild') { - withKubeConfig([ - credentialsId: clusterCreds, - serverUrl: clusterURL, namespace: namespace - ]) { - withCredentials([ - file( - credentialsId: 'pl-dev-infra-jenkins-sa-json', - variable: 'GOOGLE_APPLICATION_CREDENTIALS' - ) - ]) { - if (profile == 'prod') { - sh './ci/cloud_build_release.sh -r' - } else { - sh './ci/cloud_build_release.sh' - } - } - } - } - } -} - -def buildScriptForCloudStagingRelease = { - try { - stage('Checkout code') { - checkoutAndInitialize() - } - stage('Build & Push Artifacts') { - pushAndDeployCloud('staging', 'plc-staging', K8S_STAGING_CREDS, K8S_STAGING_CLUSTER) - } - } - catch (err) { - currentBuild.result = 'FAILURE' - echo "Exception thrown:\n ${err}" - echo 'Stacktrace:' - err.printStackTrace() - } - sendCloudReleaseSlackNotification('Staging') - postBuildActions() -} - -def buildScriptForCloudProdRelease = { - try { - stage('Checkout code') { - checkoutAndInitialize() - } - stage('Build & Push Artifacts') { - pushAndDeployCloud('prod', 'plc', K8S_PROD_CREDS, K8S_PROD_CLUSTER) - } - } - catch (err) { - currentBuild.result = 'FAILURE' - echo "Exception thrown:\n ${err}" - echo 'Stacktrace:' - err.printStackTrace() - } - sendCloudReleaseSlackNotification('Prod') - postBuildActions() -} - -def buildScriptForStirlingPerfEval = { - stage('Checkout code.') { - checkoutAndInitialize() - } - stage('Build & push.') { - buildAndPushPemImagesForPerfEval() - } - if (currentBuild.result == 'SUCCESS' || currentBuild.result == null) { - stage('Stirling perf eval.') { - parallel(stirlingPerfBuilders) - } - } - else { - currentBuild.result = 'FAILURE' - } -} - -if (isNightlyTestRegressionRun) { - buildScriptForNightlyTestRegression(regressionBuilders) -} else if (isNightlyBPFTestRegressionRun) { - buildScriptForNightlyTestRegression(bpfRegressionBuilders) -} else if (isCloudStagingBuildRun) { - buildScriptForCloudStagingRelease() -} else if (isCloudProdBuildRun) { - buildScriptForCloudProdRelease() -} else if (isOSSCloudBuildRun) { - buildScriptForOSSCloudRelease() -} else if (isStirlingPerfEval) { - buildScriptForStirlingPerfEval() -} else { - buildScriptForCommits() -} diff --git a/k8s/devinfra/jenkins-oss/README.md b/k8s/devinfra/jenkins-oss/README.md deleted file mode 100644 index 56e914788f5..00000000000 --- a/k8s/devinfra/jenkins-oss/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Setup Helm: - helm repo add jenkins https://charts.jenkins.io - helm repo update - -To deploy Jenkins run the following: - helm upgrade cd-jenkins -f values.yaml jenkins/jenkins --wait - - -To get the password: - echo Password: $(kubectl get secret --namespace jenkins-oss cd-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode) diff --git a/k8s/devinfra/jenkins-oss/frontend_config.yaml b/k8s/devinfra/jenkins-oss/frontend_config.yaml deleted file mode 100644 index 36c9c952c51..00000000000 --- a/k8s/devinfra/jenkins-oss/frontend_config.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: networking.gke.io/v1beta1 -kind: FrontendConfig -metadata: - name: frontend-config -spec: - redirectToHttps: - enabled: true - sslPolicy: gke-ingress-ssl-policy diff --git a/k8s/devinfra/jenkins-oss/managed_cert.yaml b/k8s/devinfra/jenkins-oss/managed_cert.yaml deleted file mode 100644 index d00214439a3..00000000000 --- a/k8s/devinfra/jenkins-oss/managed_cert.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: networking.gke.io/v1 -kind: ManagedCertificate -metadata: - name: jenkins-oss-ingress-managed-cert -spec: - domains: - - jenkins.px.dev diff --git a/k8s/devinfra/jenkins-oss/values.yaml b/k8s/devinfra/jenkins-oss/values.yaml deleted file mode 100644 index c6d75aab71c..00000000000 --- a/k8s/devinfra/jenkins-oss/values.yaml +++ /dev/null @@ -1,1147 +0,0 @@ ---- -# Default values for jenkins. -# This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value - -## Overrides for generated resource names -# See templates/_helpers.tpl -# nameOverride: -# fullnameOverride: -namespaceOverride: jenkins-oss - -# For FQDN resolving of the controller service. Change this value to match your existing configuration. -# ref: https://github.com/kubernetes/dns/blob/master/docs/specification.md -clusterZone: "cluster.local" - -renderHelmLabels: true - -controller: - # Used for label app.kubernetes.io/component - componentName: "jenkins-controller" - image: "jenkins/jenkins" - tag: "2.375.1-lts-jdk11" - # tagLabel: jdk11 - imagePullPolicy: "Always" - imagePullSecretName: - # Optionally configure lifetime for controller-container - lifecycle: - # postStart: - # exec: - # command: - # - "uname" - # - "-a" - disableRememberMe: false - numExecutors: 0 - # configures the executor mode of the Jenkins node. Possible values are: NORMAL or EXCLUSIVE - executorMode: "NORMAL" - customJenkinsLabels: [] - # The default configuration uses this secret to configure an admin user - # If you don't need that user or use a different security realm then you can disable it - adminSecret: true - - hostNetworking: false - # When enabling LDAP or another non-Jenkins identity source, the built-in admin account will no longer exist. - # If you disable the non-Jenkins identity store and instead use the Jenkins internal one, - # you should revert controller.adminUser to your preferred admin user: - adminUser: "admin" - # adminPassword: the-password - admin: - existingSecret: "" - userKey: jenkins-admin-user - passwordKey: jenkins-admin-password - # This values should not be changed unless you use your custom image of jenkins or any devired from. If you want to - # use Cloudbees Jenkins Distribution docker, you should set jenkinsHome: "/var/cloudbees-jenkins-distribution" - jenkinsHome: "/var/jenkins_home" - # This values should not be changed unless you use your custom image of jenkins or any devired from. If you want to - # use Cloudbees Jenkins Distribution docker, you should set - # jenkinsRef: "/usr/share/cloudbees-jenkins-distribution/ref" - jenkinsRef: "/usr/share/jenkins/ref" - # Path to the jenkins war file which is used by jenkins-plugin-cli. - jenkinsWar: "/usr/share/jenkins/jenkins.war" - # Overrides the default arguments passed to the war - # overrideArgs: - # - --httpPort=8080 - resources: - requests: - cpu: "2000m" - memory: "4096Mi" - limits: - cpu: "8000m" - memory: "8192Mi" - # Overrides the init container default values - # initContainerResources: - # requests: - # cpu: "50m" - # memory: "256Mi" - # limits: - # cpu: "2000m" - # memory: "4096Mi" - # Environment variables that get added to the init container (useful for e.g. http_proxy) - # initContainerEnv: - # - name: http_proxy - # value: "http://192.168.64.1:3128" - # containerEnv: - # - name: http_proxy - # value: "http://192.168.64.1:3128" - # Set min/max heap here if needed with: - javaOpts: "-Xms2048m -Xmx4096m" - jenkinsOpts: "-Dorg.csanchez.jenkins.plugins.kubernetes.pipeline.ContainerExecDecorator.websocketConnectionTimeout=60" - # If you are using the ingress definitions provided by this chart via the `controller.ingress` block the configured - # hostname will be the ingress hostname starting with `https://` or `http://` depending on the `tls` configuration. - # The Protocol can be overwritten by specifying `controller.jenkinsUrlProtocol`. - jenkinsUrlProtocol: "https" - # If you are not using the provided ingress you can specify `controller.jenkinsUrl` to change the url definition. - jenkinsUrl: "https://jenkins.px.dev" - # If you set this prefix and use ingress controller then you might want to set the ingress path below - # jenkinsUriPrefix: "/jenkins" - # Enable pod security context (must be `true` if podSecurityContextOverride, runAsUser or fsGroup are set) - usePodSecurityContext: true - # Note that `runAsUser`, `fsGroup`, and `securityContextCapabilities` are - # being deprecated and replaced by `podSecurityContextOverride`. - # Set runAsUser to 1000 to let Jenkins run as non-root user 'jenkins' which exists in 'jenkins/jenkins' docker image. - # When setting runAsUser to a different value than 0 also set fsGroup to the same value: - runAsUser: 1000 - fsGroup: 1000 - # If you have PodSecurityPolicies that require dropping of capabilities as suggested by CIS K8s benchmark, - # put them here - securityContextCapabilities: {} - # drop: - # - NET_RAW - # Completely overwrites the contents of the `securityContext`, ignoring the - # values provided for the deprecated fields: `runAsUser`, `fsGroup`, and - # `securityContextCapabilities`. In the case of mounting an ext4 filesystem, - # it might be desirable to use `supplementalGroups` instead of `fsGroup` in - # the `securityContext` block: https://github.com/kubernetes/kubernetes/issues/67014#issuecomment-589915496 - # podSecurityContextOverride: - # runAsUser: 1000 - # runAsNonRoot: true - # supplementalGroups: [1000] - # # capabilities: {} - # Container securityContext - containerSecurityContext: - runAsUser: 1000 - runAsGroup: 1000 - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - servicePort: 8080 - targetPort: 8080 - # For minikube, set this to NodePort, elsewhere use LoadBalancer - # Use ClusterIP if your setup includes ingress controller - serviceType: NodePort - # Use Local to preserve the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, - # but risks potentially imbalanced traffic spreading. - serviceExternalTrafficPolicy: - # Jenkins controller service annotations - serviceAnnotations: - cloud.google.com/backend-config: '{"ports": {"8080": "jenkins-backendconfig"}}' - cloud.google.com/app-protocols: '{"http":"HTTP"}' - cloud.google.com/load-balancer-type: external - cloud.google.com/neg: '{"ingress": true}' - # Jenkins controller custom labels - statefulSetLabels: {} - # foo: bar - # bar: foo - # Jenkins controller service labels - serviceLabels: {} - # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: https - # Put labels on Jenkins controller pod - podLabels: {} - # Used to create Ingress record (should used with ServiceType: ClusterIP) - # nodePort: - # -Dcom.sun.management.jmxremote.port=4000 - # -Dcom.sun.management.jmxremote.authenticate=false - # -Dcom.sun.management.jmxremote.ssl=false - # jmxPort: 4000 - # Optionally configure other ports to expose in the controller container - extraPorts: [] - # - name: BuildInfoProxy - # port: 9000 - # targetPort: 9010 (Optional: Use to explicitly set targetPort if different from port) - - # List of plugins to be install during Jenkins controller start - installPlugins: - - ansicolor:1.0.2 - - antisamy-markup-formatter:155.v795fb_8702324 - - configuration-as-code:1569.vb_72405b_80249 - - embeddable-build-status:312.vf2de01b_051d0 - - git:5.0.0 - - github:1.36.0 - - http_request:1.16 - - jackson2-api:2.14.1-313.v504cdd45c18b - - junit:1166.va_436e268e972 - - kubernetes:3802.vb_b_600831fcb_3 - - material-theme:0.5.2-rc100.6121925fe229 - - scm-api:631.v9143df5b_e4a_a - - workflow-aggregator:590.v6a_d052e5a_a_b_5 - - github-oauth:0.39 - - blueocean:1.27.1 - - blueocean-dashboard:1.27.1 - - blueocean-web:1.27.1 - - matrix-auth:3.0 - - pipeline-utility-steps:2.14.0 - - display-url-api:2.3.7 - - job-dsl:1.81 - - xunit:3.1.2 - - ghprb:1.42.2 - - script-security:1229.v4880b_b_e905a_6 - - basic-branch-build-strategies:71.vc1421f89888e - - kubernetes-cli:1.12.0 - # Set to false to download the minimum required version of all dependencies. - installLatestPlugins: true - - # Set to true to download latest dependencies of any plugin that is requested to have the latest version. - installLatestSpecifiedPlugins: false - - # List of plugins to install in addition to those listed in controller.installPlugins - additionalPlugins: [] - - # Enable to initialize the Jenkins controller only once on initial installation. - # Without this, whenever the controller gets restarted (Evicted, etc.) it will fetch plugin updates which has the - # potential to cause breakage. Note that for this to work, `persistence.enabled` needs to be set to `true` - initializeOnce: true - - # Enable to always override the installed plugins with the values of 'controller.installPlugins' - # on upgrade or redeployment. - overwritePlugins: true - - # Configures if plugins bundled with `controller.image` should be overwritten with the values of - # 'controller.installPlugins' on upgrade or redeployment. - overwritePluginsFromImage: true - - # Configures the restrictions for naming projects. Set this key to null or empty to skip it in the default config. - projectNamingStrategy: standard - - # Enable HTML parsing using OWASP Markup Formatter Plugin (antisamy-markup-formatter), useful with ghprb plugin. - # The plugin is not installed by default, please update controller.installPlugins. - enableRawHtmlMarkupFormatter: true - # Used to approve a list of groovy functions in pipelines used the script-security plugin. - # Can be viewed under /scriptApproval - scriptApproval: - - "field hudson.model.Queue$Item task" - - "method hudson.model.AbstractCIBase getQueue" - - "method hudson.model.Queue getItems" - - "staticMethod jenkins.model.Jenkins get" - # - "method groovy.json.JsonSlurperClassic parseText java.lang.String" - # - "new groovy.json.JsonSlurperClassic" - # List of groovy init scripts to be executed during Jenkins controller start - initScripts: [] - # - | - # print 'adding global pipeline libraries, register properties, bootstrap jobs...' - - # 'name' is a name of an existing secret in same namespace as jenkins, - # 'keyName' is the name of one of the keys inside current secret. - # the 'name' and 'keyName' are concatenated with a '-' in between, so for example: - # an existing secret "secret-credentials" and a key inside it named "github-password" should be used in Jcasc - # as ${secret-credentials-github-password} - # 'name' and 'keyName' must be lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', - # and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc') - # existingSecret existing secret "secret-credentials" and a key inside it named "github-username" - # should be used in Jcasc as ${github-username}. - # When using existingSecret no need to specify the keyName under additionalExistingSecrets. - existingSecret: "" - - additionalExistingSecrets: - - name: github-secrets - keyName: client-id - - name: github-secrets - keyName: client-secret - - name: github-secrets - keyName: buildbot-api-token - - name: github-secrets - keyName: rate-limit-token - - name: github-secrets - keyName: ssh-key - - name: bb-secrets - keyName: api-key - - name: codecov-secrets - keyName: token - - additionalSecrets: [] - # - name: nameOfSecret - # value: secretText - - # Generate SecretClaim resources in order to create Kubernetes secrets from HashiCorp Vault using - # kube-vault-controller. - # 'name' is name of the secret that will be created in Kubernetes. The Jenkins fullname is prepended to this value. - # 'path' is the fully qualified path to the secret in Vault - # 'type' is an optional Kubernetes secret type. Defaults to 'Opaque' - # 'renew' is an optional secret renewal time in seconds - secretClaims: [] - # - name: secretName # required - # path: testPath # required - # type: kubernetes.io/tls # optional - # renew: 60 # optional - - # Name of default cloud configuration. - cloudName: "devinfra-cluster-usw1-0" - - # Below is the implementation of Jenkins Configuration as Code. Add a key under configScripts for each - # configuration area, where each corresponds to a plugin or section of the UI. Each key (prior to | character) is - # just a label, and can be any value. Keys are only used to give the section a meaningful name. The only - # restriction is they may only contain RFC 1123 \ DNS label characters: lowercase letters, numbers, and hyphens. - # The keys become the name of a configuration yaml file on the controller in - # /var/jenkins_home/casc_configs (by default) and will be processed by the Configuration as Code Plugin. - # The lines after each | become the content of the configuration yaml file. The first line after - # this is a JCasC root element, eg jenkins, credentials, - # etc. Best reference is https:///configuration-as-code/reference. - # The example below creates a welcome message: - JCasC: - defaultConfig: true - configUrls: [] - # - https://acme.org/jenkins.yaml - # Remote URL:s for configuration files. - configScripts: - # yamllint disable rule:indentation - gh: |- - unclassified: - gitHubPluginConfig: - configs: - - credentialsId: "github-buildbot-api-token" - manageHooks: false - name: "gh" - ghprbTrigger: - adminlist: "zasgar aimichelle vihangm" - autoCloseFailedPullRequests: false - cron: "H/5 * * * *" - extensions: - - ghprbSimpleStatus: - addTestResults: true - commitStatusContext: "Jenkins Build" - completedStatus: - - message: "Build Passed" - result: SUCCESS - showMatrixStatus: false - startedStatus: "Jenkins Build Running" - statusUrl: "$RUN_DISPLAY_URL" - githubAuth: - - credentialsId: "github-buildbot-api-token" - serverAPIUrl: "https://api.github.com" - manageWebhooks: true - okToTestPhrase: "$^ Never match this since it is insecure" - requestForTestingPhrase: "Can one of the admins verify this patch?" - retestPhrase: ".*test\\W+this\\W+please.*" - skipBuildPhrase: ".*\\[skip\\W+ci\\].*" - useComments: true - useDetailedComments: false - whitelistPhrase: ".*add\\W+to\\W+allowlist.*" - creds: |- - credentials: - system: - domainCredentials: - - credentials: - - string: - id: "github-license-ratelimit" - description: "A github OAuth token for pixie-labs-buildbot with no additional oauth scopes." - scope: GLOBAL - secret: "${github-secrets-rate-limit-token}" - - string: - id: "github-buildbot-api-token" - description: "A github OAuth token for pixie-labs-buildbot with access to write to PRs." - scope: GLOBAL - secret: "${github-secrets-buildbot-api-token}" - - string: - id: "buildbuddy-api-key" - description: "API key for Buildbuddy" - scope: GLOBAL - secret: "${bb-secrets-api-key}" - - string: - id: "pixie-oss-codecov-token" - description: "Token for CodeCov" - scope: GLOBAL - secret: "${codecov-secrets-token}" - - basicSSHUserPrivateKey: - scope: GLOBAL - id: buildbot-ssh - username: git - description: "Read-only build bot access" - privateKeySource: - directEntry: - privateKey: "${github-secrets-ssh-key}" - pipeline: |- - jobs: - - script: > - folder("pixie-oss") - - script: > - folder("pixie-release") - - script: > - pipelineJob("pixie-oss/build-and-test-all") { - logRotator(30, 250) - properties { - disableConcurrentBuilds() - pipelineTriggers { - triggers { - githubPush() - } - } - } - definition { - cpsScm { - scriptPath('Jenkinsfile') - scm { - git { - remote { - url('https://github.com/pixie-io/pixie.git') - credentials('buildbot-ssh') - } - branch('main') - } - } - } - } - } - - script: > - pipelineJob("pixie-oss/build-and-test-pr") { - logRotator(30, 250) - properties { - githubProjectUrl('https://github.com/pixie-io/pixie/') - } - definition { - cpsScm { - scriptPath('Jenkinsfile') - scm { - git { - remote { - url('https://github.com/pixie-io/pixie.git') - credentials('buildbot-ssh') - refspec('+refs/pull/*:refs/remotes/origin/pr/*') - } - branch('^${sha1}') - } - } - triggers { - githubPullRequest { - admins(['zasgar', 'aimichelle', 'vihangm', 'jamesmbartlett']) - allowMembersOfWhitelistedOrgsAsAdmin(false) - orgWhitelist('pixie-io') - useGitHubHooks(true) - extensions { - cancelBuildsOnUpdate { - overrideGlobal(true) - } - } - } - } - } - } - } - - script: > - multibranchPipelineJob("pixie-release/cloud") { - branchSources { - branchSource { - source { - git { - id('pixie-cloud') - remote('https://github.com/pixie-io/pixie.git') - credentialsId('buildbot-ssh') - traits { - gitTagDiscovery() - headWildcardFilter { - includes("release/cloud/prod/*") - excludes('') - } - } - } - } - buildStrategies { - buildTags { - atMostDays('1') - atLeastDays('0') - } - } - } - } - triggers { - periodicFolderTrigger { - interval('1m') - } - } - } - # yamllint enable rule:indentation - # Allows adding to the top-level security JCasC section. For legacy, default the chart includes - # apiToken configurations - security: - apiToken: - creationOfLegacyTokenEnabled: false - tokenGenerationOnCreationEnabled: false - usageStatisticsEnabled: true - # Ignored if securityRealm is defined in controller.JCasC.configScripts - # yamllint disable rule:indentation - securityRealm: |- - github: - clientID: "${github-secrets-client-id}" - clientSecret: "${github-secrets-client-secret}" - githubApiUri: "https://api.github.com" - githubWebUri: "https://github.com" - oauthScopes: "read:org,user:email" - # Ignored if authorizationStrategy is defined in controller.JCasC.configScripts - authorizationStrategy: |- - globalMatrix: - permissions: - - "GROUP:Overall/Read:anonymous" - - "GROUP:View/Read:anonymous" - - "GROUP:Job/Read:anonymous" - - "GROUP:Job/Discover:anonymous" - - "GROUP:Job/ViewStatus:anonymous" - - "GROUP:Overall/Read:authenticated" - - "GROUP:View/Read:authenticated" - - "GROUP:Job/Read:authenticated" - - "GROUP:Job/Discover:authenticated" - - "GROUP:Job/ViewStatus:authenticated" - - "GROUP:Overall/Administer:pixie-io*admin" - # Optionally specify additional init-containers - customInitContainers: [] - # - name: custom-init - # image: "alpine:3.7@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10" - # imagePullPolicy: IfNotPresent - # command: ["/bin/sh", "-c", "ls"] - # volumeMounts: - # - name: jenkins-home - # mountPath: "/var/jenkins_home" - - sidecars: - configAutoReload: - # If enabled: true, Jenkins Configuration as Code will be reloaded on-the-fly without a reboot. - # If false or not-specified, - # jcasc changes will cause a reboot and will only be applied at the subsequent start-up. Auto-reload uses the - # http:///reload-configuration-as-code endpoint to reapply config when changes to the - # configScripts are detected. - enabled: true - image: kiwigrid/k8s-sidecar:1.15.0@sha256:abc3060bfe232788886f279530f8afe02614ef590ae59d9d58f637df770bcffc - imagePullPolicy: IfNotPresent - resources: - {} - # limits: - # cpu: 100m - # memory: 100Mi - # requests: - # cpu: 50m - # memory: 50Mi - # How many connection-related errors to retry on - reqRetryConnect: 10 - # env: - # - name: REQ_TIMEOUT - # value: "30" - # SSH port value can be set to any unused TCP port. The default, 1044, is a non-standard SSH port - # that has been chosen at random. - # Is only used to reload jcasc config from the sidecar container running in the Jenkins controller pod. - # This TCP port will not be open in the pod (unless you specifically configure this), so Jenkins will not be - # accessible via SSH from outside of the pod. Note if you use non-root pod privileges (runAsUser & fsGroup), - # this must be > 1024: - sshTcpPort: 1044 - # folder in the pod that should hold the collected dashboards: - folder: "/var/jenkins_home/casc_configs" - # If specified, the sidecar will search for JCasC config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces: - # searchNamespace: - containerSecurityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - - # Allows you to inject additional/other sidecars - other: [] - ## The example below runs the client for https://smee.io as sidecar container next to Jenkins, - ## that allows to trigger build behind a secure firewall. - ## https://jenkins.io/blog/2019/01/07/webhook-firewalls/#triggering-builds-with-webhooks-behind-a-secure-firewall - ## - ## Note: To use it you should go to https://smee.io/new and update the url to the generete one. - # - name: smee - # yamllint disable-line rule:line-length - # image: docker.io/twalter/smee-client:1.0.2@sha256:cd7abc0156a96a92c75edb6b7f06eb2f8abaaeb8d160040d5636492328e2fc66 - # args: ["--port", "{{ .Values.controller.servicePort }}", \ - # "--path", "/github-webhook/", "--url", "https://smee.io/new"] - # resources: - # limits: - # cpu: 50m - # memory: 128Mi - # requests: - # cpu: 10m - # memory: 32Mi - # Name of the Kubernetes scheduler to use - schedulerName: "" - # Node labels and tolerations for pod assignment - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - nodeSelector: {} - - terminationGracePeriodSeconds: - - terminationMessagePath: - terminationMessagePolicy: - - tolerations: [] - - affinity: {} - # Leverage a priorityClass to ensure your pods survive resource shortages - # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - priorityClassName: - - podAnnotations: {} - # Add StatefulSet annotations - statefulSetAnnotations: {} - - # StatefulSet updateStrategy - # ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - updateStrategy: {} - - ingress: - enabled: true - # Override for the default paths that map requests to the backend - paths: [] - # - backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - # - backend: - # serviceName: >- - # {{ template "jenkins.fullname" . }} - # # Don't use string here, use only integer value! - # servicePort: 8080 - # For Kubernetes v1.14+, use 'networking.k8s.io/v1beta1' - # For Kubernetes v1.19+, use 'networking.k8s.io/v1' - apiVersion: "networking.k8s.io/v1" - labels: {} - annotations: - kubernetes.io/ingress.global-static-ip-name: jenkins-oss-external-ipaddr - kubernetes.io/ingress.class: gce - networking.gke.io/managed-certificates: jenkins-oss-ingress-managed-cert - networking.gke.io/v1beta1.FrontendConfig: "frontend-config" - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - # #specifying-the-class-of-an-ingress - # ingressClassName doesn't work with GKE, we need to continue using the annotation. - # ingressClassName: gce - # Set this path to jenkinsUriPrefix above or use annotations to rewrite path - path: "/*" - # configures the hostname e.g. jenkins.example.com - hostName: jenkins.px.dev - tls: - # - secretName: jenkins.cluster.local - # hosts: - # - jenkins.cluster.local - - # often you want to have your controller all locked down and private - # but you still want to get webhooks from your SCM - # A secondary ingress will let you expose different urls - # with a differnt configuration - secondaryingress: - enabled: false - # paths you want forwarded to the backend - # ex /github-webhook - paths: [] - # For Kubernetes v1.14+, use 'networking.k8s.io/v1beta1' - # For Kubernetes v1.19+, use 'networking.k8s.io/v1' - apiVersion: "extensions/v1beta1" - labels: {} - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - # #specifying-the-class-of-an-ingress - # ingressClassName: nginx - # configures the hostname e.g. jenkins-external.example.com - hostName: - tls: - # - secretName: jenkins-external.example.com - # hosts: - # - jenkins-external.example.com - - # If you're running on GKE and need to configure a backendconfig - # to finish ingress setup, use the following values. - # Docs: https://cloud.google.com/kubernetes-engine/docs/concepts/backendconfig - backendconfig: - enabled: true - apiVersion: "cloud.google.com/v1" - name: jenkins-backendconfig - labels: {} - annotations: {} - spec: - cdn: - enabled: true - cachePolicy: - includeHost: true - includeProtocol: true - includeQueryString: true - - # Openshift route - route: - enabled: false - labels: {} - annotations: {} - # path: "/jenkins" - - # controller.hostAliases allows for adding entries to Pod /etc/hosts: - # https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - hostAliases: [] - # - ip: 192.168.50.50 - # hostnames: - # - something.local - # - ip: 10.0.50.50 - # hostnames: - # - other.local - - # Expose Prometheus metrics - prometheus: - # If enabled, add the prometheus plugin to the list of plugins to install - # https://plugins.jenkins.io/prometheus - enabled: false - # Additional labels to add to the ServiceMonitor object - serviceMonitorAdditionalLabels: {} - # Set a custom namespace where to deploy ServiceMonitor resource - # serviceMonitorNamespace: monitoring - scrapeInterval: 60s - # This is the default endpoint used by the prometheus plugin - scrapeEndpoint: /prometheus - # Additional labels to add to the PrometheusRule object - alertingRulesAdditionalLabels: {} - # An array of prometheus alerting rules - # See here: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ - # The `groups` root object is added by default, simply add the rule entries - alertingrules: [] - # Set a custom namespace where to deploy PrometheusRule resource - prometheusRuleNamespace: "" - - googlePodMonitor: - # If enabled, It creates Google Managed Prometheus scraping config - enabled: false - # Set a custom namespace where to deploy PodMonitoring resource - # serviceMonitorNamespace: "" - scrapeInterval: 60s - # This is the default endpoint used by the prometheus plugin - scrapeEndpoint: /prometheus - - # Can be used to disable rendering controller test resources when using helm template - testEnabled: true - -agent: - enabled: true - defaultsProviderTemplate: "" - # URL for connecting to the Jenkins contoller - jenkinsUrl: "https://jenkins.px.dev" - # connect to the specified host and port, instead of connecting directly to the Jenkins controller - jenkinsTunnel: - kubernetesConnectTimeout: 5 - kubernetesReadTimeout: 15 - maxRequestsPerHostStr: "32" - namespace: - image: "jenkins/inbound-agent" - tag: "4.11.2-4" - workingDir: "/home/jenkins/agent" - nodeUsageMode: "NORMAL" - customJenkinsLabels: [] - # name of the secret to be used for image pulling - imagePullSecretName: - componentName: "jenkins-agent" - websocket: false - privileged: false - runAsUser: - runAsGroup: - hostNetworking: false - resources: - requests: - cpu: "512m" - memory: "512Mi" - limits: - cpu: "512m" - memory: "512Mi" - # You may want to change this to true while testing a new image - alwaysPullImage: false - # Controls how agent pods are retained after the Jenkins build completes - # Possible values: Always, Never, OnFailure - podRetention: "Never" - # Disable if you do not want the Yaml the agent pod template to show up - # in the job Console Output. This can be helpful for either security reasons - # or simply to clean up the output to make it easier to read. - showRawYaml: true - # You can define the volumes that you want to mount for this container - # Allowed types are: ConfigMap, EmptyDir, HostPath, Nfs, PVC, Secret - # Configure the attributes as they appear in the corresponding Java class for that type - # https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/ - # jenkins/plugins/kubernetes/volumes - volumes: [] - # - type: ConfigMap - # configMapName: myconfigmap - # mountPath: /var/myapp/myconfigmap - # - type: EmptyDir - # mountPath: /var/myapp/myemptydir - # memory: false - # - type: HostPath - # hostPath: /var/lib/containers - # mountPath: /var/myapp/myhostpath - # - type: Nfs - # mountPath: /var/myapp/mynfs - # readOnly: false - # serverAddress: "192.0.2.0" - # serverPath: /var/lib/containers - # - type: PVC - # claimName: mypvc - # mountPath: /var/myapp/mypvc - # readOnly: false - # - type: Secret - # defaultMode: "600" - # mountPath: /var/myapp/mysecret - # secretName: mysecret - # Pod-wide environment, these vars are visible to any container in the agent pod - - # You can define the workspaceVolume that you want to mount for this container - # Allowed types are: DynamicPVC, EmptyDir, HostPath, Nfs, PVC - # Configure the attributes as they appear in the corresponding Java class for that type - # https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/\ - # jenkins/plugins/kubernetes/volumes/workspace - workspaceVolume: {} - ## DynamicPVC example - # type: DynamicPVC - # configMapName: myconfigmap - ## EmptyDir example - # type: EmptyDir - # memory: false - ## HostPath example - # type: HostPath - # hostPath: /var/lib/containers - ## NFS example - # type: Nfs - # readOnly: false - # serverAddress: "192.0.2.0" - # serverPath: /var/lib/containers - ## PVC example - # type: PVC - # claimName: mypvc - # readOnly: false - # - # Pod-wide environment, these vars are visible to any container in the agent pod - envVars: [] - # - name: PATH - # value: /usr/local/bin - nodeSelector: {} - # Key Value selectors. Ex: - # jenkins-agent: v1 - - # Executed command when side container gets started - command: - args: "${computer.jnlpmac} ${computer.name}" - # Side container name - sideContainerName: "jnlp" - # Doesn't allocate pseudo TTY by default - TTYEnabled: false - # Max number of spawned agent - containerCap: 30 - # Pod name - podName: "default" - # Allows the Pod to remain active for reuse until the configured number of - # minutes has passed since the last step was executed on it. - idleMinutes: 0 - # Raw yaml template for the Pod. For example this allows usage of toleration for agent pods. - # https://github.com/jenkinsci/kubernetes-plugin#using-yaml-to-define-pod-templates - # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - yamlTemplate: "" - # yamlTemplate: |- - # apiVersion: v1 - # kind: Pod - # spec: - # tolerations: - # - key: "key" - # operator: "Equal" - # value: "value" - # Defines how the raw yaml field gets merged with yaml definitions from inherited pod templates: merge or override - yamlMergeStrategy: "override" - # Timeout in seconds for an agent to be online - connectTimeout: 100 - # Annotations to apply to the pod. - annotations: {} - - # Add additional containers to the agents. - # Containers specified here are added to all agents. Set key empty to remove container from additional agents. - additionalContainers: [] - # - sideContainerName: dind - # image: docker - # tag: dind - # command: dockerd-entrypoint.sh - # args: "" - # privileged: true - # resources: - # requests: - # cpu: 500m - # memory: 1Gi - # limits: - # cpu: 1 - # memory: 2Gi - - # Disable the default Jenkins Agent configuration. - # Useful when configuring agents only with the podTemplates value, since the default podTemplate populated by values - # mentioned above will be excluded in the rendered template. - disableDefaultAgent: true - - # Below is the implementation of custom pod templates for the default configured kubernetes cloud. - # Add a key under podTemplates for each pod template. Each key (prior to | character) is just a label, - # and can be any value. - # Keys are only used to give the pod template a meaningful name. The only restriction is they may only - # contain RFC 1123 \ DNS label - # characters: lowercase letters, numbers, and hyphens. Each pod template can contain multiple containers. - # For this pod templates configuration to be loaded the following values must be set: - # controller.JCasC.defaultConfig: true - # Best reference is https:///configuration-as-code/reference#Cloud-kubernetes. - # The example below creates a python pod template. - podTemplates: {} - # python: | - # - name: python - # label: jenkins-python - # serviceAccount: jenkins - # containers: - # - name: python - # image: python:3@sha256:f7382f4f9dbc51183c72d621b9c196c1565f713a1fe40c119d215c961fa22815 - # command: "/bin/sh -c" - # args: "cat" - # ttyEnabled: true - # privileged: true - # resourceRequestCpu: "400m" - # resourceRequestMemory: "512Mi" - # resourceLimitCpu: "1" - # resourceLimitMemory: "1024Mi" - -# Here you can add additional agents -# They inherit all values from `agent` so you only need to specify values which differ -additionalAgents: {} -# maven: -# podName: maven -# customJenkinsLabels: maven -# # An example of overriding the jnlp container -# # sideContainerName: jnlp -# image: jenkins/jnlp-agent-maven -# tag: latest -# python: -# podName: python -# customJenkinsLabels: python -# sideContainerName: python -# image: python -# tag: "3" -# command: "/bin/sh -c" -# args: "cat" -# TTYEnabled: true - -persistence: - enabled: true - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: - ## jenkins data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: - annotations: {} - labels: {} - accessMode: "ReadWriteOnce" - size: "100Gi" - volumes: - # - name: nothing - # emptyDir: {} - mounts: - # - mountPath: /var/nothing - # name: nothing - # readOnly: true - -networkPolicy: - # Enable creation of NetworkPolicy resources. - enabled: false - # For Kubernetes v1.4, v1.5 and v1.6, use 'extensions/v1beta1' - # For Kubernetes v1.7, use 'networking.k8s.io/v1' - apiVersion: networking.k8s.io/v1 - # You can allow agents to connect from both within the cluster (from within specific/all namespaces) - # AND/OR from a given external IP range - internalAgents: - allowed: true - podLabels: {} - namespaceLabels: - {} - # project: myproject - externalAgents: {} - # ipCIDR: 172.17.0.0/16 - # except: - # - 172.17.1.0/24 - -## Install Default RBAC roles and bindings -rbac: - create: true - readSecrets: false - -serviceAccount: - create: true - # The name of the service account is autogenerated by default - name: - annotations: {} - imagePullSecretName: - -serviceAccountAgent: - # Specifies whether a ServiceAccount should be created - create: false - # The name of the ServiceAccount to use. - # If not set and create is true, a name is generated using the fullname template - name: - annotations: {} - imagePullSecretName: - -## Backup cronjob configuration -## Ref: https://github.com/maorfr/kube-tasks -backup: - # Backup must use RBAC - # So by enabling backup you are enabling RBAC specific for backup - enabled: false - # Used for label app.kubernetes.io/component - componentName: "backup" - # Schedule to run jobs. Must be in cron time format - # Ref: https://crontab.guru/ - schedule: "0 2 * * *" - labels: {} - serviceAccount: - create: true - name: - annotations: {} - # Example for authorization to AWS S3 using kube2iam or IRSA - # Can also be done using environment variables - # iam.amazonaws.com/role: "jenkins" - # "eks.amazonaws.com/role-arn": "arn:aws:iam::123456789012:role/jenkins-backup" - # Set this to terminate the job that is running/failing continuously and set the job status to "Failed" - activeDeadlineSeconds: "" - image: - repository: "maorfr/kube-tasks" - tag: "0.2.0" - imagePullSecretName: - # Additional arguments for kube-tasks - # Ref: https://github.com/maorfr/kube-tasks#simple-backup - extraArgs: [] - # Add existingSecret for AWS credentials - existingSecret: {} - ## Example for using an existing secret - # jenkinsaws: - ## Use this key for AWS access key ID - # awsaccesskey: jenkins_aws_access_key - ## Use this key for AWS secret access key - # awssecretkey: jenkins_aws_secret_key - # Add additional environment variables - # jenkinsgcp: - ## Use this key for GCP credentials - # gcpcredentials: credentials.json - env: [] - # Example environment variable required for AWS credentials chain - # - name: "AWS_REGION" - # value: "us-east-1" - resources: - requests: - memory: 1Gi - cpu: 1 - limits: - memory: 1Gi - cpu: 1 - # Destination to store the backup artifacts - # Supported cloud storage services: AWS S3, Minio S3, Azure Blob Storage, Google Cloud Storage - # Additional support can added. Visit this repository for details - # Ref: https://github.com/maorfr/skbn - destination: "s3://jenkins-data/backup" - # By enabling only the jenkins_home/jobs folder gets backed up, not the whole jenkins instance - onlyJobs: false - # Enable backup pod security context (must be `true` if runAsUser or fsGroup are set) - usePodSecurityContext: true - # When setting runAsUser to a different value than 0 also set fsGroup to the same value: - runAsUser: 1000 - fsGroup: 1000 - securityContextCapabilities: {} - # drop: - # - NET_RAW -checkDeprecation: true - -4eawsSecurityGroupPolicies: - enabled: false - policies: - - name: "" - securityGroupIds: [] - podSelector: {}