- Basic Structure
- Pipeline Types
- Core Components
- Triggers
- Variables
- Jobs & Stages
- Tasks & Steps
- Templates
- Conditions & Expressions
- Deployment Strategies
- Complete Methods Reference
- Authentication Methods Overview
trigger:
- main
pool:
vmImage: 'ubuntu-latest'
steps:
- task: TaskName@Version
inputs:
parameter: valuestages:
- stage: Build
jobs:
- job: BuildJob
steps:
- script: echo Building...
- stage: Deploy
dependsOn: Build
jobs:
- deployment: DeployJob
environment: production
strategy:
runOnce:
deploy:
steps:
- script: echo Deploying...| Type | File | Use Case |
|---|---|---|
| Classic | azure-pipelines.yml |
Standard CI/CD |
| Multi-Stage | azure-pipelines.yml |
Complex deployments |
| Resources Pipeline | Referenced YAML | Reusable workflows |
| Classic UI | Web-based | Visual pipeline designer |
Branch Triggers
trigger:
branches:
include:
- main
- develop
- release/*
exclude:
- hotfix/*
paths:
include:
- src/*
exclude:
- docs/*PR Triggers
pr:
branches:
include:
- main
paths:
exclude:
- README.mdScheduled Triggers
schedules:
- cron: "0 0 * * *"
displayName: Daily midnight build
branches:
include:
- main
always: truePipeline Triggers
resources:
pipelines:
- pipeline: upstream
source: ProjectName.PipelineName
trigger:
branches:
include:
- mainpool:
vmImage: 'ubuntu-latest'
# Options: ubuntu-latest, windows-latest, macOS-latest
# Self-hosted agent pool
pool:
name: 'MyAgentPool'
demands:
- agent.os -equals Linux
- npmMatrix Strategy
strategy:
matrix:
Python37:
python.version: '3.7'
Python38:
python.version: '3.8'
Python39:
python.version: '3.9'
maxParallel: 3| Trigger Type | Syntax | Description |
|---|---|---|
| CI Trigger | trigger: |
Continuous integration on commit |
| PR Trigger | pr: |
Pull request validation |
| Scheduled | schedules: |
Time-based triggers (cron) |
| Pipeline | resources.pipelines |
Triggered by another pipeline |
| Container | resources.containers |
Container image updates |
| Webhook | resources.webhooks |
External webhook triggers |
| Manual | trigger: none |
Manual execution only |
Disable Automatic Triggers
trigger: none
pr: none1. Pipeline-Level Variables
variables:
buildConfiguration: 'Release'
vmImage: 'ubuntu-latest'2. Variable Groups
variables:
- group: 'MyVariableGroup'
- name: customVar
value: 'myValue'3. Runtime Variables
steps:
- bash: echo "##vso[task.setvariable variable=myVar]myValue"
- bash: echo $(myVar)4. Output Variables
steps:
- bash: echo "##vso[task.setvariable variable=myOutputVar;isOutput=true]outputValue"
name: setOutputVar
- bash: echo $(setOutputVar.myOutputVar)5. Multi-Job Output Variables
jobs:
- job: A
steps:
- bash: echo "##vso[task.setvariable variable=myVar;isOutput=true]value"
name: setVar
- job: B
dependsOn: A
variables:
varFromA: $[ dependencies.A.outputs['setVar.myVar'] ]
steps:
- bash: echo $(varFromA)| Variable | Description |
|---|---|
$(Build.SourceBranch) |
Source branch (refs/heads/main) |
$(Build.BuildId) |
Unique build identifier |
$(Build.BuildNumber) |
Build number (can be customized) |
$(Build.SourceVersion) |
Commit SHA |
$(Agent.BuildDirectory) |
Working directory |
$(System.DefaultWorkingDirectory) |
Repository root |
$(Pipeline.Workspace) |
Workspace for artifacts |
$(Build.ArtifactStagingDirectory) |
Staging directory |
1. Standard Job
jobs:
- job: MyJob
displayName: 'My Build Job'
pool:
vmImage: 'ubuntu-latest'
steps:
- script: echo Hello2. Deployment Job
jobs:
- deployment: DeployWeb
displayName: 'Deploy to Web App'
environment: production
strategy:
runOnce:
deploy:
steps:
- download: current
artifact: webapp
- task: AzureWebApp@13. Container Job
jobs:
- job: Container
container:
image: node:16
options: --cpus 2
steps:
- script: node --versionstages:
- stage: Build
displayName: 'Build Stage'
condition: succeeded()
jobs:
- job: BuildJob
steps:
- script: echo Building
- stage: Test
dependsOn: Build
condition: succeeded()
jobs:
- job: TestJob
steps:
- script: echo Testing
- stage: Deploy
dependsOn:
- Build
- Test
condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/main'))
jobs:
- deployment: DeployJob
environment: production| Step Type | Purpose | Example |
|---|---|---|
| script | Inline shell script | script: echo Hello |
| bash | Bash script | bash: echo Hello |
| pwsh | PowerShell Core | pwsh: Write-Host Hello |
| powershell | Windows PowerShell | powershell: Write-Host Hello |
| task | Predefined task | task: CmdLine@2 |
| checkout | Repository checkout | checkout: self |
| download | Download artifacts | download: current |
| publish | Publish artifacts | publish: $(Build.ArtifactStagingDirectory) |
1. Checkout
steps:
- checkout: self
clean: true
fetchDepth: 0
persistCredentials: true2. Script Execution
steps:
- script: |
echo "Multi-line script"
npm install
npm run build
displayName: 'Build Application'
workingDirectory: $(System.DefaultWorkingDirectory)3. Copy Files
steps:
- task: CopyFiles@2
inputs:
SourceFolder: '$(Build.SourcesDirectory)'
Contents: '**/*.js'
TargetFolder: '$(Build.ArtifactStagingDirectory)'4. Publish Artifacts
steps:
- task: PublishBuildArtifacts@1
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'drop'
publishLocation: 'Container'5. Download Artifacts
steps:
- download: current
artifact: drop
- task: DownloadPipelineArtifact@2
inputs:
buildType: 'specific'
project: 'MyProject'
pipeline: 'PipelineId'
artifactName: 'drop'1. Step Template (steps-template.yml)
parameters:
- name: buildConfiguration
type: string
default: 'Release'
steps:
- script: echo Building ${{ parameters.buildConfiguration }}
- script: dotnet build --configuration ${{ parameters.buildConfiguration }}Usage:
steps:
- template: steps-template.yml
parameters:
buildConfiguration: 'Debug'2. Job Template (job-template.yml)
parameters:
- name: jobName
type: string
- name: vmImage
type: string
default: 'ubuntu-latest'
jobs:
- job: ${{ parameters.jobName }}
pool:
vmImage: ${{ parameters.vmImage }}
steps:
- script: echo Running job ${{ parameters.jobName }}3. Stage Template (stage-template.yml)
parameters:
- name: stageName
type: string
- name: environment
type: string
stages:
- stage: ${{ parameters.stageName }}
jobs:
- deployment: Deploy
environment: ${{ parameters.environment }}
strategy:
runOnce:
deploy:
steps:
- script: echo Deploying to ${{ parameters.environment }}4. Variable Template (variables.yml)
variables:
- name: buildConfiguration
value: 'Release'
- name: nodeVersion
value: '16.x'Usage:
variables:
- template: variables.yml
steps:
- script: echo $(buildConfiguration)# Conditional insertion
${{ if eq(parameters.runTests, true) }}:
- script: npm test
# Iteration
${{ each env in parameters.environments }}:
- stage: Deploy_${{ env }}
jobs:
- deployment: Deploy
environment: ${{ env }}
# Object iteration
parameters:
- name: configs
type: object
default:
debug: '-g'
release: '-O2'
${{ each config in parameters.configs }}:
- script: gcc ${{ config.value }} main.c
displayName: Build ${{ config.key }}| Syntax | When to Use |
|---|---|
condition: succeeded() |
Default, runs if previous succeeded |
condition: failed() |
Runs only if previous failed |
condition: always() |
Always runs regardless of status |
condition: succeededOrFailed() |
Runs unless cancelled |
condition: cancelled() |
Runs only if cancelled |
# AND condition
condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/main'))
# OR condition
condition: or(eq(variables.isMain, true), eq(variables.isDevelop, true))
# NOT condition
condition: not(eq(variables['Build.Reason'], 'PullRequest'))
# Combined
condition: |
and(
succeeded(),
or(
eq(variables['Build.SourceBranch'], 'refs/heads/main'),
startsWith(variables['Build.SourceBranch'], 'refs/heads/release/')
),
ne(variables['Build.Reason'], 'PullRequest')
)| Function | Example | Description |
|---|---|---|
eq() |
eq(variables.env, 'prod') |
Equals |
ne() |
ne(variables.env, 'dev') |
Not equals |
and() |
and(succeeded(), condition) |
Logical AND |
or() |
or(failed(), condition) |
Logical OR |
not() |
not(cancelled()) |
Logical NOT |
contains() |
contains(variables.branch, 'feature') |
String contains |
startsWith() |
startsWith(variables.branch, 'refs/heads/release') |
String starts with |
endsWith() |
endsWith(variables.file, '.yml') |
String ends with |
in() |
in(variables.env, 'dev', 'test') |
Value in list |
notIn() |
notIn(variables.env, 'prod') |
Value not in list |
strategy:
runOnce:
preDeploy:
steps:
- script: echo Pre-deployment
deploy:
steps:
- script: echo Deploying
routeTraffic:
steps:
- script: echo Routing traffic
postRouteTraffic:
steps:
- script: echo Post-routing
on:
failure:
steps:
- script: echo Cleanup on failure
success:
steps:
- script: echo Success actionsstrategy:
rolling:
maxParallel: 2
preDeploy:
steps:
- script: echo Pre-deploy
deploy:
steps:
- script: echo Deploy to batch
postRouteTraffic:
steps:
- script: echo Health checkstrategy:
canary:
increments: [10, 20, 50, 100]
preDeploy:
steps:
- script: echo Preparing canary
deploy:
steps:
- script: echo Deploy canary
routeTraffic:
steps:
- script: echo Route $(strategy.increment)% traffic
postRouteTraffic:
steps:
- script: echo Monitor canary
on:
failure:
steps:
- script: echo Rollback canary# Custom implementation
jobs:
- deployment: DeployGreen
environment: production-green
strategy:
runOnce:
deploy:
steps:
- script: echo Deploy to green slot
- job: ValidateGreen
dependsOn: DeployGreen
steps:
- script: echo Run smoke tests
- deployment: SwapSlots
dependsOn: ValidateGreen
environment: production
strategy:
runOnce:
deploy:
steps:
- script: echo Swap blue and green| Method/Property | Scope | Description | Example |
|---|---|---|---|
name |
Pipeline | Custom build number format | name: $(Date:yyyyMMdd)$(Rev:.r) |
trigger |
Pipeline | CI trigger configuration | trigger: [main, develop] |
pr |
Pipeline | PR trigger configuration | pr: none |
schedules |
Pipeline | Scheduled triggers | schedules: [cron: "0 0 * * *"] |
resources |
Pipeline | External resources | resources.repositories |
variables |
Pipeline/Stage/Job | Variable declarations | variables: [name: var, value: val] |
stages |
Pipeline | Stage definitions | stages: [- stage: Build] |
jobs |
Stage | Job definitions | jobs: [- job: BuildJob] |
steps |
Job | Step definitions | steps: [- script: echo hi] |
pool |
Pipeline/Job | Agent pool selection | pool: {vmImage: ubuntu-latest} |
lockBehavior |
Stage/Job | Resource locking behavior | lockBehavior: sequential |
workspace |
Job | Workspace cleanup | workspace: {clean: all} |
| Method/Property | Description | Example |
|---|---|---|
job |
Standard job identifier | job: BuildJob |
deployment |
Deployment job identifier | deployment: DeployJob |
displayName |
Human-readable name | displayName: 'Build Application' |
dependsOn |
Job dependencies | dependsOn: [BuildJob, TestJob] |
condition |
Execution condition | condition: succeeded() |
continueOnError |
Continue on failure | continueOnError: true |
timeoutInMinutes |
Job timeout | timeoutInMinutes: 120 |
cancelTimeoutInMinutes |
Cancel grace period | cancelTimeoutInMinutes: 5 |
strategy |
Execution strategy | strategy: {matrix: {}, parallel: 2} |
container |
Container specification | container: {image: ubuntu:20.04} |
services |
Service containers | services: {postgres: {image: postgres}} |
environment |
Deployment environment | environment: production |
| Method/Property | Description | Example |
|---|---|---|
script |
Inline script | script: echo Hello |
bash |
Bash script | `bash: |
pwsh |
PowerShell Core | pwsh: Write-Host Hello |
powershell |
Windows PowerShell | powershell: Write-Host Hello |
task |
Task reference | task: VSBuild@1 |
checkout |
Checkout code | checkout: self |
download |
Download artifacts | download: current |
downloadBuild |
Download from build | downloadBuild: specificBuild |
publish |
Publish artifacts | publish: $(Build.ArtifactStagingDirectory) |
template |
Template reference | template: template.yml |
displayName |
Step display name | displayName: 'Build Step' |
name |
Step identifier | name: buildStep |
condition |
Step condition | condition: succeeded() |
continueOnError |
Continue on error | continueOnError: true |
enabled |
Step enabled state | enabled: true |
timeoutInMinutes |
Step timeout | timeoutInMinutes: 10 |
env |
Environment variables | env: {VAR: value} |
workingDirectory |
Working directory | workingDirectory: $(System.DefaultWorkingDirectory) |
retryCountOnTaskFailure |
Retry count | retryCountOnTaskFailure: 3 |
| Method | Description | Example |
|---|---|---|
variables |
Declare variables | variables: [buildConfig: Release] |
group |
Variable group | - group: MyVariableGroup |
name/value |
Named variable | - name: var, value: val |
task.setvariable |
Set runtime variable | ##vso[task.setvariable variable=x]value |
isOutput |
Output variable flag | ##vso[task.setvariable variable=x;isOutput=true]val |
isSecret |
Secret variable flag | ##vso[task.setvariable variable=x;isSecret=true]val |
isReadOnly |
Read-only variable | ##vso[task.setvariable variable=x;isReadOnly=true]val |
| Resource Type | Description | Example |
|---|---|---|
repositories |
External repositories | resources.repositories: [repo: self] |
containers |
Container images | resources.containers: [container: ubuntu] |
pipelines |
Pipeline resources | resources.pipelines: [pipeline: upstream] |
builds |
Classic build | resources.builds: [build: classic] |
packages |
Package resources | resources.packages: [package: npm] |
webhooks |
Webhook resources | resources.webhooks: [webhook: external] |
resources:
repositories:
- repository: tools
type: git
name: MyProject/Tools
- repository: shared
type: github
endpoint: GitHubConnection
name: org/shared-lib
steps:
- checkout: self
- checkout: tools
- checkout: sharedsteps:
- task: AzureCLI@2
inputs:
azureSubscription: 'MyAzureConnection'
scriptType: 'bash'
scriptLocation: 'inlineScript'
inlineScript: |
az account show# Publish multiple artifacts
steps:
- publish: $(Build.SourcesDirectory)/bin
artifact: binaries
- publish: $(Build.SourcesDirectory)/docs
artifact: documentation
# Download from specific pipeline
- download: upstream
artifact: drop
patterns: '**/*.dll'strategy:
matrix:
Linux:
imageName: 'ubuntu-latest'
platform: 'linux'
Windows:
imageName: 'windows-latest'
platform: 'windows'
macOS:
imageName: 'macOS-latest'
platform: 'darwin'
maxParallel: 3
pool:
vmImage: $(imageName)
steps:
- script: echo Building for $(platform)stages:
- stage: Deploy
jobs:
- deployment: DeployProd
environment: production # Configure approvals in environment settings
strategy:
runOnce:
deploy:
steps:
- script: echo Deploying after approvalresources:
containers:
- container: postgres
image: postgres:13
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: password
jobs:
- job: Test
services:
postgres: postgres
steps:
- script: |
psql -h localhost -U postgres -c "SELECT 1"resources:
containers:
- container: buildenv
image: node:16-alpine
options: --user root
jobs:
- job: Build
container: buildenv
steps:
- script: |
node --version
npm install
npm run buildpool:
name: MyAgentPool
demands:
- Agent.OS -equals Linux
- npm
- node.js
- docker
- javasteps:
- bash: |
echo "##vso[task.setvariable variable=myVar]myValue"
echo "##vso[task.complete result=Succeeded;]Task completed"
echo "##vso[task.logissue type=warning]This is a warning"
echo "##vso[task.logissue type=error]This is an error"
echo "##vso[build.updatebuildnumber]1.0.$(Build.BuildId)"
echo "##vso[build.addbuildtag]production"variables:
npm_config_cache: $(Pipeline.Workspace)/.npm
steps:
- task: Cache@2
inputs:
key: 'npm | "$(Agent.OS)" | package-lock.json'
restoreKeys: |
npm | "$(Agent.OS)"
path: $(npm_config_cache)
displayName: Cache npm
- script: npm ciname: $(Date:yyyyMMdd)$(Rev:.r)
trigger:
branches:
include:
- main
- release/*
paths:
exclude:
- docs/*
- README.md
pr:
branches:
include:
- main
variables:
- group: prod-variables
- name: buildConfiguration
value: 'Release'
- name: vmImage
value: 'ubuntu-latest'
stages:
- stage: Build
displayName: 'Build Stage'
jobs:
- job: BuildJob
displayName: 'Build Application'
pool:
vmImage: $(vmImage)
steps:
- checkout: self
clean: true
fetchDepth: 1
- task: UseDotNet@2
displayName: 'Install .NET SDK'
inputs:
packageType: 'sdk'
version: '8.x'
- task: Cache@2
displayName: 'Cache NuGet packages'
inputs:
key: 'nuget | "$(Agent.OS)" | **/packages.lock.json'
path: $(NUGET_PACKAGES)
- script: |
dotnet restore
dotnet build --configuration $(buildConfiguration) --no-restore
dotnet test --configuration $(buildConfiguration) --no-build --logger trx
displayName: 'Restore, Build, and Test'
- task: PublishTestResults@2
condition: succeededOrFailed()
inputs:
testResultsFormat: 'VSTest'
testResultsFiles: '**/*.trx'
- task: DotNetCoreCLI@2
displayName: 'Publish Application'
inputs:
command: 'publish'
publishWebProjects: true
arguments: '--configuration $(buildConfiguration) --output $(Build.ArtifactStagingDirectory)'
- publish: $(Build.ArtifactStagingDirectory)
artifact: drop
displayName: 'Publish Artifacts'
- stage: DeployDev
displayName: 'Deploy to Dev'
dependsOn: Build
condition: succeeded()
jobs:
- deployment: DeployDev
displayName: 'Deploy to Development'
environment: development
pool:
vmImage: $(vmImage)
strategy:
runOnce:
deploy:
steps:
- download: current
artifact: drop
- task: AzureWebApp@1
displayName: 'Deploy to Azure Web App'
inputs:
azureSubscription: 'AzureServiceConnection'
appType: 'webAppLinux'
appName: 'myapp-dev'
package: '$(Pipeline.Workspace)/drop/**/*.zip'
- stage: DeployProd
displayName: 'Deploy to Production'
dependsOn: DeployDev
condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/main'))
jobs:
- deployment: DeployProd
displayName: 'Deploy to Production'
environment: production
pool:
vmImage: $(vmImage)
strategy:
canary:
increments: [10, 25, 50, 100]
deploy:
steps:
- download: current
artifact: drop
- task: AzureWebApp@1
displayName: 'Deploy to Azure Web App'
inputs:
azureSubscription: 'AzureServiceConnection'
appType: 'webAppLinux'
appName: 'myapp-prod'
package: '$(Pipeline.Workspace)/drop/**/*.zip'
deploymentMethod: 'zipDeploy'- Use templates for reusable pipeline components
- Implement caching for dependencies to speed up builds
- Use variable groups for sensitive data and environment-specific values
- Enable parallel execution where possible with matrix strategies
- Implement proper error handling with conditions and continueOnError
- Use deployment jobs for deployments to leverage environment features
- Set timeouts to prevent hanging jobs
- Use output variables to pass data between jobs
- Implement approval gates for production deployments
- Version your pipeline YAML alongside your code
- Use service connections instead of hardcoded credentials
- Implement proper logging with logging commands for debugging
- Use artifacts efficiently by publishing only what’s needed
- Implement health checks in deployment strategies
- Use conditions wisely to control pipeline flow
- Check agent logs for detailed error information
- Verify service connections are properly configured
- Validate YAML syntax using the pipeline editor
- Check variable values with echo statements
- Review conditions that might be preventing execution
- Verify pool availability and agent capabilities
- Check artifact paths and ensure they exist
- Review permissions for environments and service connections
- Use debug mode by setting
system.debug: truevariable - Check for typos in variable names and task inputs
| Service | Auth Method | Prerequisites | Best Practice |
|---|---|---|---|
| Databricks | Personal Access Token (PAT) Service Principal Azure CLI |
Databricks workspace, Token/SP credentials | Use Service Principal for production |
| Snowflake | Username/Password Key Pair Auth OAuth |
Snowflake account, credentials | Use Key Pair authentication for automation |
| Azure Container Registry | Service Connection Service Principal Admin Credentials Managed Identity |
ACR instance, Azure subscription | Service Connection with SP |
| Azure Kubernetes Service | Service Connection kubeconfig Service Principal Managed Identity |
AKS cluster, Azure subscription | Service Connection with RBAC |
trigger:
branches:
include:
- main
- develop
paths:
include:
- src/*
exclude:
- docs/*
pool:
vmImage: 'ubuntu-latest'
variables:
- group: my-variable-group
- name: BUILD_CONFIG
value: 'Release'
stages:
- stage: Build
jobs:
- job: BuildJob
steps:
- task: Bash@3
displayName: 'Build Script'
inputs:
targetType: 'inline'
script: |
echo "Building..."stages:
- stage: Build
displayName: 'Build Stage'
jobs:
- job: BuildJob
steps:
- script: echo "Building"
- stage: Test
displayName: 'Test Stage'
dependsOn: Build
jobs:
- job: TestJob
steps:
- script: echo "Testing"
- stage: Deploy
displayName: 'Deploy Stage'
dependsOn: Test
condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/main'))
jobs:
- deployment: DeployJob
environment: 'production'
strategy:
runOnce:
deploy:
steps:
- script: echo "Deploying"variables:
- group: databricks-secrets # Contains DATABRICKS_TOKEN
steps:
- task: Bash@3
displayName: 'Deploy to Databricks with PAT'
inputs:
targetType: 'inline'
script: |
# Set Databricks host and token
export DATABRICKS_HOST="https://adb-1234567890123456.7.azuredatabricks.net"
export DATABRICKS_TOKEN="$(DATABRICKS_TOKEN)"
# Install Databricks CLI
pip install databricks-cli
# Configure Databricks CLI
echo "[DEFAULT]
host = $DATABRICKS_HOST
token = $DATABRICKS_TOKEN" > ~/.databrickscfg
# List workspaces to verify connection
databricks workspace ls /
# Deploy notebook
databricks workspace import_dir ./notebooks /Shared/cicd-deployed --overwritevariables:
- group: databricks-sp-secrets
# Variables: DATABRICKS_HOST, ARM_TENANT_ID, ARM_CLIENT_ID, ARM_CLIENT_SECRET
steps:
- task: Bash@3
displayName: 'Databricks SP Authentication'
inputs:
targetType: 'inline'
script: |
# Install required packages
pip install databricks-cli azure-cli
# Get Azure AD token using Service Principal
export DATABRICKS_AAD_TOKEN=$(az account get-access-token \
--resource 2ff814a6-3304-4ab8-85cb-cd0e6f879c1d \
--tenant $(ARM_TENANT_ID) \
--query accessToken -o tsv)
# Get Azure management token
export AZURE_TOKEN=$(az account get-access-token \
--resource https://management.core.windows.net/ \
--tenant $(ARM_TENANT_ID) \
--query accessToken -o tsv)
# Configure Databricks
export DATABRICKS_HOST="$(DATABRICKS_HOST)"
# Create cluster using REST API
curl -X POST "${DATABRICKS_HOST}/api/2.0/clusters/create" \
-H "Authorization: Bearer ${DATABRICKS_AAD_TOKEN}" \
-H "X-Databricks-Azure-SP-Management-Token: ${AZURE_TOKEN}" \
-d '{
"cluster_name": "cicd-cluster",
"spark_version": "11.3.x-scala2.12",
"node_type_id": "Standard_DS3_v2",
"num_workers": 2
}'
env:
ARM_TENANT_ID: $(ARM_TENANT_ID)
ARM_CLIENT_ID: $(ARM_CLIENT_ID)
ARM_CLIENT_SECRET: $(ARM_CLIENT_SECRET)steps:
- task: Bash@3
displayName: 'Deploy Databricks Job'
inputs:
targetType: 'inline'
script: |
pip install databricks-cli
export DATABRICKS_HOST="$(DATABRICKS_HOST)"
export DATABRICKS_TOKEN="$(DATABRICKS_TOKEN)"
# Upload notebook
databricks workspace import \
./notebooks/etl_pipeline.py \
/Shared/Jobs/etl_pipeline \
--language PYTHON \
--overwrite
# Create or update job
JOB_ID=$(databricks jobs list --output JSON | \
jq -r '.jobs[] | select(.settings.name=="ETL Pipeline") | .job_id')
if [ -z "$JOB_ID" ]; then
# Create new job
databricks jobs create --json '{
"name": "ETL Pipeline",
"max_concurrent_runs": 1,
"tasks": [{
"task_key": "etl_task",
"notebook_task": {
"notebook_path": "/Shared/Jobs/etl_pipeline",
"base_parameters": {}
},
"new_cluster": {
"spark_version": "11.3.x-scala2.12",
"node_type_id": "Standard_DS3_v2",
"num_workers": 2
}
}]
}'
else
# Update existing job
databricks jobs reset --job-id $JOB_ID --json-file job-config.json
fivariables:
- group: snowflake-secrets
# Variables: SNOWFLAKE_ACCOUNT, SNOWFLAKE_USER, SNOWFLAKE_PASSWORD, SNOWFLAKE_DATABASE
steps:
- task: Bash@3
displayName: 'Connect to Snowflake with Password'
inputs:
targetType: 'inline'
script: |
# Install Snowflake connector
pip install snowflake-connector-python
# Create Python script for connection
cat > snowflake_deploy.py << 'EOF'
import snowflake.connector
import os
conn = snowflake.connector.connect(
account=os.environ['SNOWFLAKE_ACCOUNT'],
user=os.environ['SNOWFLAKE_USER'],
password=os.environ['SNOWFLAKE_PASSWORD'],
warehouse='COMPUTE_WH',
database=os.environ['SNOWFLAKE_DATABASE'],
schema='PUBLIC'
)
cursor = conn.cursor()
# Execute SQL
cursor.execute("SELECT CURRENT_VERSION()")
print(f"Snowflake version: {cursor.fetchone()[0]}")
# Deploy stored procedure
with open('procedures/my_procedure.sql', 'r') as f:
sql = f.read()
cursor.execute(sql)
cursor.close()
conn.close()
EOF
python snowflake_deploy.py
env:
SNOWFLAKE_ACCOUNT: $(SNOWFLAKE_ACCOUNT)
SNOWFLAKE_USER: $(SNOWFLAKE_USER)
SNOWFLAKE_PASSWORD: $(SNOWFLAKE_PASSWORD)
SNOWFLAKE_DATABASE: $(SNOWFLAKE_DATABASE)variables:
- group: snowflake-keypair
# Variables: SNOWFLAKE_ACCOUNT, SNOWFLAKE_USER, SNOWFLAKE_PRIVATE_KEY
steps:
- task: Bash@3
displayName: 'Snowflake Key Pair Auth'
inputs:
targetType: 'inline'
script: |
pip install snowflake-connector-python cryptography
# Decode base64 private key from variable
echo "$(SNOWFLAKE_PRIVATE_KEY)" | base64 -d > private_key.pem
cat > snowflake_connect.py << 'EOF'
import snowflake.connector
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import os
# Load private key
with open("private_key.pem", "rb") as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend=default_backend()
)
pkb = private_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
conn = snowflake.connector.connect(
account=os.environ['SNOWFLAKE_ACCOUNT'],
user=os.environ['SNOWFLAKE_USER'],
private_key=pkb
)
cursor = conn.cursor()
cursor.execute("SELECT CURRENT_USER(), CURRENT_ROLE()")
print(f"Connected as: {cursor.fetchone()}")
# Run migrations
cursor.execute("USE DATABASE MY_DB")
cursor.execute("USE SCHEMA MY_SCHEMA")
# Execute DDL
with open('migrations/001_create_tables.sql', 'r') as f:
for statement in f.read().split(';'):
if statement.strip():
cursor.execute(statement)
cursor.close()
conn.close()
EOF
python snowflake_connect.py
rm -f private_key.pem
env:
SNOWFLAKE_ACCOUNT: $(SNOWFLAKE_ACCOUNT)
SNOWFLAKE_USER: $(SNOWFLAKE_USER)steps:
- task: Bash@3
displayName: 'Deploy with SnowSQL'
inputs:
targetType: 'inline'
script: |
# Download and install SnowSQL
curl -O https://sfc-repo.snowflakecomputing.com/snowsql/bootstrap/1.2/linux_x86_64/snowsql-1.2.28-linux_x86_64.bash
SNOWSQL_DEST=~/bin SNOWSQL_LOGIN_SHELL=~/.profile bash snowsql-1.2.28-linux_x86_64.bash
# Configure SnowSQL
mkdir -p ~/.snowsql
cat > ~/.snowsql/config << EOF
[connections.cicd]
accountname = $(SNOWFLAKE_ACCOUNT)
username = $(SNOWFLAKE_USER)
password = $(SNOWFLAKE_PASSWORD)
warehousename = COMPUTE_WH
databasename = $(SNOWFLAKE_DATABASE)
schemaname = PUBLIC
EOF
# Execute SQL scripts
~/bin/snowsql -c cicd -f migrations/deploy.sql -o output_format=json -o friendly=false
# Execute with variables
~/bin/snowsql -c cicd -D environment=production -f deploy.sql
env:
SNOWFLAKE_ACCOUNT: $(SNOWFLAKE_ACCOUNT)
SNOWFLAKE_USER: $(SNOWFLAKE_USER)
SNOWFLAKE_PASSWORD: $(SNOWFLAKE_PASSWORD)
SNOWFLAKE_DATABASE: $(SNOWFLAKE_DATABASE)resources:
repositories:
- repository: self
variables:
dockerRegistryServiceConnection: 'my-acr-service-connection'
imageRepository: 'myapp'
containerRegistry: 'myacr.azurecr.io'
dockerfilePath: '$(Build.SourcesDirectory)/Dockerfile'
tag: '$(Build.BuildId)'
steps:
- task: Docker@2
displayName: 'Build and Push to ACR'
inputs:
command: buildAndPush
repository: $(imageRepository)
dockerfile: $(dockerfilePath)
containerRegistry: $(dockerRegistryServiceConnection)
tags: |
$(tag)
latestvariables:
- group: acr-credentials
# Variables: ACR_NAME, ACR_SP_ID, ACR_SP_PASSWORD
steps:
- task: Bash@3
displayName: 'Build and Push with Docker'
inputs:
targetType: 'inline'
script: |
# Login to ACR using Service Principal
echo "$(ACR_SP_PASSWORD)" | docker login $(ACR_NAME).azurecr.io \
--username $(ACR_SP_ID) \
--password-stdin
# Build image
docker build -t $(ACR_NAME).azurecr.io/myapp:$(Build.BuildId) .
docker tag $(ACR_NAME).azurecr.io/myapp:$(Build.BuildId) $(ACR_NAME).azurecr.io/myapp:latest
# Push to ACR
docker push $(ACR_NAME).azurecr.io/myapp:$(Build.BuildId)
docker push $(ACR_NAME).azurecr.io/myapp:latest
# Logout
docker logout $(ACR_NAME).azurecr.iosteps:
- task: AzureCLI@2
displayName: 'Build and Push with Azure CLI'
inputs:
azureSubscription: 'my-azure-subscription'
scriptType: 'bash'
scriptLocation: 'inlineScript'
inlineScript: |
# ACR login using Azure CLI
az acr login --name $(ACR_NAME)
# Build with ACR Tasks (serverless build)
az acr build \
--registry $(ACR_NAME) \
--image myapp:$(Build.BuildId) \
--image myapp:latest \
--file Dockerfile \
.
# List images
az acr repository show-tags \
--name $(ACR_NAME) \
--repository myapp \
--output tablesteps:
- task: Bash@3
displayName: 'Build with Layer Caching'
inputs:
targetType: 'inline'
script: |
echo "$(ACR_SP_PASSWORD)" | docker login $(ACR_NAME).azurecr.io \
--username $(ACR_SP_ID) \
--password-stdin
# Pull previous image for caching
docker pull $(ACR_NAME).azurecr.io/myapp:latest || true
# Build with cache
docker build \
--cache-from $(ACR_NAME).azurecr.io/myapp:latest \
--build-arg BUILDKIT_INLINE_CACHE=1 \
-t $(ACR_NAME).azurecr.io/myapp:$(Build.BuildId) \
-t $(ACR_NAME).azurecr.io/myapp:latest \
.
# Push both tags
docker push $(ACR_NAME).azurecr.io/myapp:$(Build.BuildId)
docker push $(ACR_NAME).azurecr.io/myapp:latestvariables:
k8sServiceConnection: 'my-aks-service-connection'
aksResourceGroup: 'my-rg'
aksClusterName: 'my-aks-cluster'
steps:
- task: KubernetesManifest@0
displayName: 'Deploy to AKS'
inputs:
action: 'deploy'
kubernetesServiceConnection: $(k8sServiceConnection)
namespace: 'production'
manifests: |
$(Pipeline.Workspace)/manifests/deployment.yaml
$(Pipeline.Workspace)/manifests/service.yaml
containers: |
$(containerRegistry)/$(imageRepository):$(tag)steps:
- task: AzureCLI@2
displayName: 'Deploy to AKS with kubectl'
inputs:
azureSubscription: 'my-azure-subscription'
scriptType: 'bash'
scriptLocation: 'inlineScript'
inlineScript: |
# Get AKS credentials
az aks get-credentials \
--resource-group $(aksResourceGroup) \
--name $(aksClusterName) \
--overwrite-existing
# Verify connection
kubectl cluster-info
kubectl get nodes
# Create namespace if not exists
kubectl create namespace production --dry-run=client -o yaml | kubectl apply -f -
# Deploy application
kubectl apply -f k8s/deployment.yaml -n production
kubectl apply -f k8s/service.yaml -n production
# Wait for rollout
kubectl rollout status deployment/myapp -n production --timeout=5m
# Get deployment status
kubectl get pods -n production -l app=myappsteps:
- task: AzureCLI@2
displayName: 'Deploy with Helm'
inputs:
azureSubscription: 'my-azure-subscription'
scriptType: 'bash'
scriptLocation: 'inlineScript'
inlineScript: |
# Get AKS credentials
az aks get-credentials \
--resource-group $(aksResourceGroup) \
--name $(aksClusterName)
# Install/upgrade Helm chart
helm upgrade --install myapp ./helm-chart \
--namespace production \
--create-namespace \
--set image.repository=$(containerRegistry)/$(imageRepository) \
--set image.tag=$(tag) \
--set ingress.enabled=true \
--set ingress.hosts[0].host=myapp.example.com \
--wait \
--timeout 10m
# Get release status
helm list -n production
helm status myapp -n productiontrigger:
branches:
include:
- main
variables:
- group: azure-credentials
dockerRegistryServiceConnection: 'acr-connection'
k8sServiceConnection: 'aks-connection'
imageRepository: 'myapp'
containerRegistry: 'myacr.azurecr.io'
tag: '$(Build.BuildId)'
aksResourceGroup: 'my-rg'
aksClusterName: 'my-aks'
stages:
- stage: Build
displayName: 'Build and Push'
jobs:
- job: Build
pool:
vmImage: 'ubuntu-latest'
steps:
- task: Docker@2
displayName: 'Build and Push Image'
inputs:
command: buildAndPush
repository: $(imageRepository)
dockerfile: 'Dockerfile'
containerRegistry: $(dockerRegistryServiceConnection)
tags: |
$(tag)
latest
- task: PublishPipelineArtifact@1
inputs:
targetPath: '$(Pipeline.Workspace)/s/k8s'
artifact: 'manifests'
- stage: Deploy
displayName: 'Deploy to AKS'
dependsOn: Build
jobs:
- deployment: Deploy
pool:
vmImage: 'ubuntu-latest'
environment: 'production'
strategy:
runOnce:
deploy:
steps:
- task: DownloadPipelineArtifact@2
inputs:
artifact: 'manifests'
path: '$(Pipeline.Workspace)/manifests'
- task: AzureCLI@2
displayName: 'Deploy to AKS'
inputs:
azureSubscription: 'my-azure-subscription'
scriptType: 'bash'
scriptLocation: 'inlineScript'
inlineScript: |
# Get credentials
az aks get-credentials \
--resource-group $(aksResourceGroup) \
--name $(aksClusterName)
# Update deployment image
kubectl set image deployment/myapp \
myapp=$(containerRegistry)/$(imageRepository):$(tag) \
-n production
# Wait for rollout
kubectl rollout status deployment/myapp -n production
# Verify deployment
kubectl get pods -n production -l app=myappsteps:
- task: AzureCLI@2
displayName: 'Blue-Green Deployment'
inputs:
azureSubscription: 'my-azure-subscription'
scriptType: 'bash'
scriptLocation: 'inlineScript'
inlineScript: |
az aks get-credentials \
--resource-group $(aksResourceGroup) \
--name $(aksClusterName)
# Determine current active color
CURRENT_COLOR=$(kubectl get service myapp -n production -o jsonpath='{.spec.selector.color}' || echo "blue")
if [ "$CURRENT_COLOR" == "blue" ]; then
NEW_COLOR="green"
else
NEW_COLOR="blue"
fi
echo "Current: $CURRENT_COLOR, Deploying to: $NEW_COLOR"
# Deploy to new color
cat > deployment-$NEW_COLOR.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-$NEW_COLOR
namespace: production
spec:
replicas: 3
selector:
matchLabels:
app: myapp
color: $NEW_COLOR
template:
metadata:
labels:
app: myapp
color: $NEW_COLOR
spec:
containers:
- name: myapp
image: $(containerRegistry)/$(imageRepository):$(tag)
ports:
- containerPort: 8080
EOF
kubectl apply -f deployment-$NEW_COLOR.yaml
kubectl rollout status deployment/myapp-$NEW_COLOR -n production
# Run smoke tests
kubectl run smoke-test --rm -i --restart=Never \
--image=curlimages/curl -- \
curl -f http://myapp-$NEW_COLOR.production.svc.cluster.local:8080/health
# Switch traffic
kubectl patch service myapp -n production -p "{\"spec\":{\"selector\":{\"color\":\"$NEW_COLOR\"}}}"
echo "Switched traffic to $NEW_COLOR"
# Optional: Scale down old deployment
sleep 30
kubectl scale deployment/myapp-$CURRENT_COLOR -n production --replicas=0steps:
- task: Bash@3
displayName: 'Production Only Step'
condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/main'))
inputs:
targetType: 'inline'
script: echo "Deploying to production"
- task: Bash@3
displayName: 'Run on PR Only'
condition: eq(variables['Build.Reason'], 'PullRequest')
inputs:
targetType: 'inline'
script: echo "Running PR checks"steps:
- task: Bash@3
displayName: 'Script with Error Handling'
inputs:
targetType: 'inline'
script: |
set -e # Exit on error
set -o pipefail # Catch errors in pipes
function cleanup {
echo "Cleaning up..."
rm -f temp_file.txt
}
trap cleanup EXIT
# Your script here
if ! command -v docker &> /dev/null; then
echo "Docker not found!"
exit 1
fi
# Continue with script
echo "Script completed successfully"steps:
- task: Bash@3
displayName: 'Deploy with Retry'
inputs:
targetType: 'inline'
script: |
MAX_RETRIES=3
RETRY_COUNT=0
until [ $RETRY_COUNT -ge $MAX_RETRIES ]
do
echo "Attempt $((RETRY_COUNT+1)) of $MAX_RETRIES"
if kubectl apply -f deployment.yaml; then
echo "Deployment successful"
break
fi
RETRY_COUNT=$((RETRY_COUNT+1))
if [ $RETRY_COUNT -lt $MAX_RETRIES ]; then
echo "Deployment failed, retrying in 10 seconds..."
sleep 10
else
echo "Max retries reached, deployment failed"
exit 1
fi
donejobs:
- job: ParallelTests
strategy:
parallel: 3
steps:
- bash: echo "Running test suite $(System.JobPositionInPhase)"
- job: MatrixBuild
strategy:
matrix:
Python38:
python.version: '3.8'
Python39:
python.version: '3.9'
Python310:
python.version: '3.10'
steps:
- bash: |
python --version
echo "Testing with Python $(python.version)"steps:
- task: Cache@2
displayName: 'Cache pip packages'
inputs:
key: 'python | "$(Agent.OS)" | requirements.txt'
restoreKeys: |
python | "$(Agent.OS)"
python
path: $(PIP_CACHE_DIR)
- task: Bash@3
displayName: 'Install Dependencies'
inputs:
targetType: 'inline'
script: |
pip install -r requirements.txtsteps:
- task: Bash@3
displayName: 'Use Secrets Safely'
inputs:
targetType: 'inline'
script: |
# Never echo secrets
# Use env variables instead of inline
echo "Connecting to database..."
# Good: Use variable without revealing
python deploy.py
# Bad: echo "Password is $(SECRET_PASSWORD)"
env:
DB_PASSWORD: $(SECRET_PASSWORD)
API_KEY: $(API_KEY)variables:
- group: common-variables
- group: production-secrets
- name: LOCAL_VAR
value: 'local-value'
steps:
- bash: |
echo "Using variable from group: $(COMMON_VAR)"
echo "Local variable: $(LOCAL_VAR)"steps:
- bash: |
# Set variable for subsequent steps
echo "##vso[task.setvariable variable=DYNAMIC_VALUE]generated-value"
echo "##vso[task.setvariable variable=IMAGE_TAG]$(Build.BuildId)"
- bash: |
echo "Using dynamic variable: $(DYNAMIC_VALUE)"
echo "Image tag: $(IMAGE_TAG)"steps:
- task: AzureKeyVault@2
inputs:
azureSubscription: 'my-azure-subscription'
KeyVaultName: 'my-keyvault'
SecretsFilter: 'DatabasePassword,ApiKey,CertificateSecret'
RunAsPreJob: true
- bash: |
echo "Retrieved secrets from Key Vault"
# Secrets now available as pipeline variables
python app.py
env:
DB_PASSWORD: $(DatabasePassword)
API_KEY: $(ApiKey)variables:
- ${{ if eq(variables['Build.SourceBranch'], 'refs/heads/main') }}:
- group: production-variables
- name: ENVIRONMENT
value: 'production'
- ${{ if ne(variables['Build.SourceBranch'], 'refs/heads/main') }}:
- group: staging-variables
- name: ENVIRONMENT
value: 'staging'
steps:
- bash: |
echo "Deploying to: $(ENVIRONMENT)"
echo "Database: $(DATABASE_URL)"