diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index b174007..0000000 Binary files a/.DS_Store and /dev/null differ diff --git a/.github/workflows/aks.yml b/.github/workflows/aks.yml index cb39c44..dabda72 100644 --- a/.github/workflows/aks.yml +++ b/.github/workflows/aks.yml @@ -1,4 +1,5 @@ on: + workflow_dispatch: push: branches: [ main ] pull_request: @@ -58,12 +59,22 @@ jobs: - name: Update Registry URL run: | sed -i.bak 's/pcgithub.azurecr.io/${{ secrets.REGISTRY_LOGIN_SERVER }}/' aks-deployment.yml + - name: Set up deployment environment variables + run: | + kustomize create --resources aks-deployment.yml,aks-serviceaccount.yaml + kustomize edit add patch --kind Deployment --patch '[{"op":"add","path":"/spec/template/spec/containers/0/env","value":[{"name":"BLOB_ENDPOINT","value":"'"${{secrets.BLOB_ENDPOINT}}"'"},{"name":"CONTAINER_NAME","value":"'"${{secrets.CONTAINER_NAME}}"'"}]}]' --group apps + if [ -n "${{ secrets.AWI_CLIENTID }}" ] + then + kustomize edit add patch --kind ServiceAccount --patch '[{"op":"add","path":"/metadata/annotations","value":{"azure.workload.identity/client-id":"'"${{secrets.AWI_CLIENTID}}"'"}}]' + kustomize edit add patch --kind Deployment --patch '[{"op":"add","path":"/spec/template/spec/serviceAccountName","value":"workload-identity-sa"}]' --group apps + fi + kustomize build . -o aks-deployment-kustomized.yml - name: Deploy to AKS id: deploy-aks uses: Azure/k8s-deploy@v4 with: namespace: 'default' manifests: | - aks-deployment.yml + aks-deployment-kustomized.yml images: '${{ secrets.REGISTRY_LOGIN_SERVER }}/pythonserver:${{ github.sha }}' annotate-namespace: 'false' diff --git a/.github/workflows/ec2.yml b/.github/workflows/ec2.yml index e879f3f..006132f 100644 --- a/.github/workflows/ec2.yml +++ b/.github/workflows/ec2.yml @@ -1,5 +1,6 @@ name: EC2 deployment on: + workflow_dispatch: push: branches: [ main ] paths: diff --git a/.github/workflows/eks.yml b/.github/workflows/eks.yml index 02b331d..cb5e079 100644 --- a/.github/workflows/eks.yml +++ b/.github/workflows/eks.yml @@ -1,4 +1,5 @@ on: + workflow_dispatch: push: branches: [ main ] pull_request: @@ -43,7 +44,7 @@ jobs: ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }} ECR_REPOSITORY: ${{ secrets.REPO_NAME }} IMAGE_TAG: ${{ github.sha }} - uses: PaloAltoNetworks/prisma-cloud-scan@v1 + uses: PaloAltoNetworks/prisma-cloud-scan@v1.4.0 with: pcc_console_url: ${{ secrets.PCC_CONSOLE_URL }} pcc_user: ${{ secrets.PCC_USER }} @@ -76,6 +77,7 @@ jobs: run: |- kustomize create --resources eks-deployment.yml kustomize edit set image "demo-image-name=${{ steps.login-ecr.outputs.registry }}/${{ secrets.REPO_NAME }}:${{ github.sha }}" + kustomize edit add patch --patch '[{"op":"add","path":"/spec/template/spec/containers/0/env","value":[{"name":"S3_BUCKET_NAME","value":"'"${{secrets.S3_BUCKET_NAME}}"'"}]}]' --group apps cat kustomization.yaml kustomize build . | kubectl apply -f - kubectl get pods -o wide diff --git a/.github/workflows/gcp-cloudrun.yml b/.github/workflows/gcp-cloudrun.yml index d3ced40..7df86ff 100644 --- a/.github/workflows/gcp-cloudrun.yml +++ b/.github/workflows/gcp-cloudrun.yml @@ -1,4 +1,5 @@ on: + workflow_dispatch: push: branches: [ main ] paths: diff --git a/.github/workflows/gke.yml b/.github/workflows/gke.yml index 21819ea..120cf3b 100644 --- a/.github/workflows/gke.yml +++ b/.github/workflows/gke.yml @@ -1,4 +1,5 @@ on: + workflow_dispatch: push: branches: [ main ] pull_request: diff --git a/.github/workflows/lambda.yml b/.github/workflows/lambda.yml index e293c69..bdaa7dd 100644 --- a/.github/workflows/lambda.yml +++ b/.github/workflows/lambda.yml @@ -1,4 +1,5 @@ on: + workflow_dispatch: push: branches: [ main ] paths: diff --git a/.gitignore b/.gitignore index ef7a1c7..10dfee0 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .external_modules/ +.DS_Store diff --git a/IaC/AKS/aks.tf b/IaC/AKS/aks.tf index d774938..0325eae 100755 --- a/IaC/AKS/aks.tf +++ b/IaC/AKS/aks.tf @@ -4,15 +4,13 @@ resource "random_string" "password" { special = false } -provider "azurerm" { - #version = "=1.38.0" +data "azurerm_subscription" "configured" { subscription_id = var.subscription_id - tenant_id = var.tenant_id - features {} } -data "azurerm_subscription" "configured" { - subscription_id = var.subscription_id +data "azurerm_kubernetes_cluster" "aks_cluster" { + name = azurerm_kubernetes_cluster.aks_cluster.name + resource_group_name = azurerm_kubernetes_cluster.aks_cluster.resource_group_name } resource "azurerm_kubernetes_cluster" "aks_cluster" { @@ -20,10 +18,16 @@ resource "azurerm_kubernetes_cluster" "aks_cluster" { # checkov:skip=CKV_AZURE_117: Disks are already encrypted at rest with Azure manged keys, which is sufficient for a demo cluster # checkov:skip=CKV_AZURE_4: No need for cluster telemetry (performance/availability) for a demo cluster # checkov:skip=CKV_AZURE_6: Cannot use trusted networks because of 200 IP limit with GH actions - name = "${var.prefix}-${random_string.password.result}" - location = var.region - resource_group_name = var.create_requirements ? azurerm_resource_group.rg[0].name : var.resource_group_name - dns_prefix = "${var.prefix}-${random_string.password.result}" + name = "${var.prefix}-${random_string.password.result}" + location = var.region + resource_group_name = var.create_requirements ? azurerm_resource_group.rg[0].name : var.resource_group_name + dns_prefix = "${var.prefix}-${random_string.password.result}" + oidc_issuer_enabled = true + workload_identity_enabled = true + automatic_channel_upgrade = "stable" + depends_on = [ + azurerm_resource_provider_registration.ewip + ] default_node_pool { name = "poolone" @@ -62,6 +66,30 @@ resource "azurerm_kubernetes_cluster" "aks_cluster" { } } +resource "azurerm_resource_provider_registration" "ewip" { + name = "Microsoft.ContainerService" + + feature { + name = "EnableWorkloadIdentityPreview" + registered = true + } +} + +resource "azurerm_user_assigned_identity" "cnappdemo" { + name = "${var.prefix}-wlid" + resource_group_name = var.create_requirements ? azurerm_resource_group.rg[0].name : var.resource_group_name + location = var.region +} + +resource "azurerm_federated_identity_credential" "cnappdemo" { + name = "${var.prefix}-wlid" + resource_group_name = azurerm_user_assigned_identity.cnappdemo.resource_group_name + issuer = azurerm_kubernetes_cluster.aks_cluster.oidc_issuer_url + parent_id = azurerm_user_assigned_identity.cnappdemo.id + audience = ["api://AzureADTokenExchange"] + subject = "system:serviceaccount:default:workload-identity-sa" +} + resource "azurerm_role_assignment" "admin_aks_rbac" { role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin" principal_id = data.azuread_client_config.current.object_id diff --git a/IaC/AKS/argocd.tf b/IaC/AKS/argocd.tf index cfbc142..898af3d 100644 --- a/IaC/AKS/argocd.tf +++ b/IaC/AKS/argocd.tf @@ -1,3 +1,5 @@ +data "azurerm_subscription" "current" {} + resource "null_resource" "run_provisioner" { count = var.run_provisioner ? 1 : 0 provisioner "local-exec" { @@ -5,6 +7,7 @@ resource "null_resource" "run_provisioner" { CSP = "AZURE", AZURE_AKS_NAME = azurerm_kubernetes_cluster.aks_cluster.name, AZURE_RG = azurerm_kubernetes_cluster.aks_cluster.resource_group_name, + AZURE_SUB_ID = data.azurerm_subscription.current.subscription_id, ARGOCD_GITOPS_REPOSITORY = var.argocd_git_repo, GITHUB_TOKEN = var.gh_token, PCC_USER = var.pcc_user, diff --git a/IaC/AKS/gh.tf b/IaC/AKS/gh.tf index 91bee63..aaec59d 100644 --- a/IaC/AKS/gh.tf +++ b/IaC/AKS/gh.tf @@ -56,3 +56,27 @@ resource "github_actions_secret" "cluster_name" { # checkov:skip=CKV_SECRET_6: not a secret plaintext_value = azurerm_kubernetes_cluster.aks_cluster.name } + +resource "github_actions_secret" "blob_endpoint" { + count = var.create_requirements && var.create_storage ? 1 : 0 + repository = var.gh_repo + secret_name = "BLOB_ENDPOINT" + # checkov:skip=CKV_SECRET_6: not a secret + plaintext_value = azurerm_storage_account.storage_acc[0].primary_blob_endpoint +} + +resource "github_actions_secret" "container_name" { + count = var.create_requirements && var.create_storage ? 1 : 0 + repository = var.gh_repo + secret_name = "CONTAINER_NAME" + # checkov:skip=CKV_SECRET_6: not a secret + plaintext_value = azurerm_storage_container.storage_container[0].name +} + +resource "github_actions_secret" "awi_clientid" { + count = var.create_requirements ? 1 : 0 + repository = var.gh_repo + secret_name = "AWI_CLIENTID" + # checkov:skip=CKV_SECRET_6: not a secret + plaintext_value = azurerm_user_assigned_identity.cnappdemo.client_id +} diff --git a/IaC/AKS/main.tf b/IaC/AKS/main.tf new file mode 100644 index 0000000..df39680 --- /dev/null +++ b/IaC/AKS/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~>3.31.0" + } + } +} + +provider "azurerm" { + subscription_id = var.subscription_id + tenant_id = var.tenant_id + features {} + skip_provider_registration = true +} diff --git a/IaC/AKS/storage.tf b/IaC/AKS/storage.tf new file mode 100644 index 0000000..848acfe --- /dev/null +++ b/IaC/AKS/storage.tf @@ -0,0 +1,36 @@ +resource "azurerm_storage_account" "storage_acc" { + # checkov:skip=CKV_AZURE_59: Public network access required for demo bucket + # checkov:skip=CKV2_AZURE_18: No CMK needed for demo bucket + # checkov:skip=CKV2_AZURE_1: No CMK needed for demo bucket + # checkov:skip=CKV_AZURE_33: No logging needed for demo bucket + count = var.create_storage ? 1 : 0 + name = "${random_string.password.result}cnappdemo" + resource_group_name = var.create_requirements ? azurerm_resource_group.rg[0].name : var.resource_group_name + account_replication_type = "LRS" + location = var.region + account_tier = "Standard" + min_tls_version = "TLS1_2" +} + +resource "azurerm_storage_container" "storage_container" { + # checkov:skip=CKV_AZURE_34: Public access needed for demo purposes + # checkov:skip=CKV2_AZURE_21: No logging needed for demo bucket + count = var.create_storage ? 1 : 0 + name = "${random_string.password.result}cnappdemo" + container_access_type = "blob" + storage_account_name = azurerm_storage_account.storage_acc[0].name +} + +resource "azurerm_role_assignment" "storage_blob_data_owner_user" { + count = var.create_storage ? 1 : 0 + role_definition_name = "Storage Blob Data Owner" + principal_id = data.azuread_client_config.current.object_id + scope = azurerm_storage_account.storage_acc[0].id +} + +resource "azurerm_role_assignment" "storage_blob_data_owner_pod" { + count = var.create_storage ? 1 : 0 + role_definition_name = "Storage Blob Data Owner" + principal_id = azurerm_user_assigned_identity.cnappdemo.principal_id + scope = azurerm_storage_account.storage_acc[0].id +} diff --git a/IaC/AKS/variables.tf b/IaC/AKS/variables.tf index 3ba8a97..13ed560 100755 --- a/IaC/AKS/variables.tf +++ b/IaC/AKS/variables.tf @@ -50,6 +50,12 @@ variable "create_acr" { default = false } +variable "create_storage" { + type = bool + description = "Create Storage account instead of using names from vars." + default = false +} + variable "acr_tags" { type = map(any) default = null diff --git a/IaC/EKS/argocd.tf b/IaC/EKS/argocd.tf index 3c64026..d118183 100644 --- a/IaC/EKS/argocd.tf +++ b/IaC/EKS/argocd.tf @@ -4,6 +4,7 @@ resource "null_resource" "run_provisioner" { environment = { CSP = "AWS", AWS_EKS_NAME = module.eks.cluster_id, + AWS_EKS_VERSION = module.eks.cluster_version, ARGOCD_GITOPS_REPOSITORY = var.argocd_git_repo, GITHUB_TOKEN = var.gh_token, PCC_USER = var.pcc_user, diff --git a/IaC/EKS/eks.tf b/IaC/EKS/eks.tf index f3cdbf5..1efc579 100644 --- a/IaC/EKS/eks.tf +++ b/IaC/EKS/eks.tf @@ -18,7 +18,7 @@ module "eks" { version = "~> 18.0" cluster_name = local.cluster_name_full - cluster_version = "1.23" + cluster_version = var.cluster_version cluster_endpoint_private_access = true cluster_endpoint_public_access = true @@ -214,6 +214,7 @@ resource "helm_release" "aws_load_balancer_controller" { name = "aws-load-balancer-controller" chart = "aws-load-balancer-controller" repository = "https://aws.github.io/eks-charts" + version = "1.5.3" namespace = "kube-system" diff --git a/IaC/EKS/gh.tf b/IaC/EKS/gh.tf index e8fd581..d6917ce 100644 --- a/IaC/EKS/gh.tf +++ b/IaC/EKS/gh.tf @@ -17,3 +17,10 @@ resource "github_actions_secret" "ecr_repo_name" { secret_name = "REPO_NAME" plaintext_value = var.ecr_repo_name } + +resource "github_actions_secret" "s3_bucket_name" { + repository = var.gh_repo + secret_name = "S3_BUCKET_NAME" + # checkov:skip=CKV_SECRET_6: not a secret + plaintext_value = var.s3_bucket_name +} diff --git a/IaC/EKS/s3.tf b/IaC/EKS/s3.tf new file mode 100644 index 0000000..99ee711 --- /dev/null +++ b/IaC/EKS/s3.tf @@ -0,0 +1,54 @@ +resource "aws_s3_bucket" "s3b" { + # checkov:skip=CKV2_AWS_6: Public access blocks not needed for a demo bucket + # checkov:skip=CKV_AWS_145: KMS not needed for a demo bucket + # checkov:skip=CKV_AWS_18: Access logging not deeded for a demo bucket + # checkov:skip=CKV_AWS_144: Region cross-replication not needed for a demo bucket + # checkov:skip=CKV_AWS_21: Versioning not needed for a demo bucket + bucket = var.s3_bucket_name +} + +resource "aws_s3_bucket_public_access_block" "pab" { + # checkov:skip=CKV_AWS_55: Public access ACL needed for demo purposes. + # checkov:skip=CKV_AWS_54: Public access ACL needed for demo purposes. + # checkov:skip=CKV_AWS_53: Public access ACL needed for demo purposes. + bucket = aws_s3_bucket.s3b.bucket + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = true +} + +resource "aws_s3_bucket_policy" "allow_upload" { + bucket = aws_s3_bucket.s3b.bucket + policy = data.aws_iam_policy_document.allow_upload.json +} + +data "aws_iam_policy_document" "allow_upload" { + statement { + actions = [ + "s3:AbortMultipartUpload", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts", + "s3:PutObject", + "s3:PutObjectAcl" + ] + principals { + type = "AWS" + identifiers = [module.eks.eks_managed_node_groups.one.iam_role_arn] + } + resources = [ + aws_s3_bucket.s3b.arn, + "${aws_s3_bucket.s3b.arn}/*" + ] + } +} + +resource "aws_s3_bucket_ownership_controls" "object_writer" { + bucket = aws_s3_bucket.s3b.bucket + + rule { + object_ownership = "ObjectWriter" + } +} diff --git a/IaC/EKS/variables.tf b/IaC/EKS/variables.tf index 726458f..f0006b1 100644 --- a/IaC/EKS/variables.tf +++ b/IaC/EKS/variables.tf @@ -65,3 +65,12 @@ variable "pcc_console" { type = string default = null } + +variable "s3_bucket_name" { + type = string +} + +variable "cluster_version" { + type = string + default = "1.23" +} diff --git a/IaC/GKE/requirements.tf b/IaC/GKE/requirements.tf index fa30575..212af28 100644 --- a/IaC/GKE/requirements.tf +++ b/IaC/GKE/requirements.tf @@ -15,6 +15,20 @@ resource "google_compute_subnetwork" "subnet" { } } +resource "google_compute_router" "router" { + count = var.create_requirements ? 1 : 0 + name = "${var.prefix}-router" + network = google_compute_network.vpc[0].id +} + +resource "google_compute_router_nat" "nat" { + count = var.create_requirements ? 1 : 0 + name = "${var.prefix}-nat" + router = google_compute_router.router[0].name + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" +} + resource "google_service_account" "service_account" { count = var.create_requirements ? 1 : 0 account_id = "${var.prefix}-githubaction" diff --git a/README.md b/README.md index b6f3c01..de096bf 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,7 @@ __**Vulnerability policies are used as example here. Can be used with compliance 2. Set WAAS rules to protect your applications from real time attacks. >We have unrealistic example in wild.py but any imported module can be the bad actor. Requirements.txt scan of code security should have caught it realistically. 3. Observe the event and alerts to keep track on production application health. ->Use tools like nuclei to simulate attacks --> nuclei -u http:/// -t nuclei-templates/f/ +>Use tools like nuclei to simulate attacks --> nuclei -u http:/// -t nuclei-templates/f/ (more info : https://medium.com/@cyb_detective/using-nuclei-for-osint-5-minute-basic-guide-f8764424902b ) ![Runtime policy - PC](img/runtime_policy.png "Runtime policy - PC") ![Runtime audit - PC](img/runtime_audit_pc.png "Runtime audit - PC") diff --git a/aks-deployment.yml b/aks-deployment.yml index 15d79f6..39373bd 100644 --- a/aks-deployment.yml +++ b/aks-deployment.yml @@ -16,6 +16,7 @@ spec: labels: aadpodidbinding: cnappdemo app: python-server-app + azure.workload.identity/use: "true" spec: containers: - name: python-server-app diff --git a/aks-serviceaccount.yaml b/aks-serviceaccount.yaml new file mode 100644 index 0000000..c068d43 --- /dev/null +++ b/aks-serviceaccount.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + azure.workload.identity/use: "true" + name: workload-identity-sa + namespace: default diff --git a/fake_commits/fake-secrets copy 2.yml b/fake_commits/fake-secrets copy 2.yml deleted file mode 100644 index f7c63e1..0000000 --- a/fake_commits/fake-secrets copy 2.yml +++ /dev/null @@ -1,2 +0,0 @@ -GITHUB_PAT=ghp_012345678901234567890123456789abcdef -AWS_ACCESS_KEY_ID=AKIA0123456789ABCDEF diff --git a/fake_commits/fake-secrets copy.yml b/fake_commits/fake-secrets copy.yml deleted file mode 100644 index f7c63e1..0000000 --- a/fake_commits/fake-secrets copy.yml +++ /dev/null @@ -1,2 +0,0 @@ -GITHUB_PAT=ghp_012345678901234567890123456789abcdef -AWS_ACCESS_KEY_ID=AKIA0123456789ABCDEF diff --git a/test b/test index e69de29..b28b04f 100644 --- a/test +++ b/test @@ -0,0 +1,3 @@ + + + diff --git a/wild.py b/wild.py index 5033f80..f0169a4 100644 --- a/wild.py +++ b/wild.py @@ -17,13 +17,16 @@ cloud_provider = platform.uname()[2] if 'amzn' in cloud_provider: s3 = boto3.resource('s3') - s3.meta.client.upload_file('/plz_del/FritzFrog/001eb377f0452060012124cb214f658754c7488ccb82e23ec56b2f45a636c859', 'cnappdemo' , unique_malware_filename) - s3.meta.client.upload_file('/plz_del/FritzFrog/10-MB-Test.docx', 'cnappdemo' , unique_sensitive_filename, ExtraArgs={'ACL':'public-read'}) - s3.meta.client.upload_file('/plz_del/FritzFrog/clia-lab-search-results-02.22.2023-19_43_13.csv', 'cnappdemo' , unique_hipaa_filename, ExtraArgs={'ACL':'public-read'}) + bucketname = os.environ.get("S3_BUCKET_NAME") if "S3_BUCKET_NAME" in os.environ else "cnappdemo" + s3.meta.client.upload_file('/plz_del/FritzFrog/001eb377f0452060012124cb214f658754c7488ccb82e23ec56b2f45a636c859', bucketname, unique_malware_filename) + s3.meta.client.upload_file('/plz_del/FritzFrog/10-MB-Test.docx', bucketname, unique_sensitive_filename, ExtraArgs={'ACL':'public-read'}) + s3.meta.client.upload_file('/plz_del/FritzFrog/clia-lab-search-results-02.22.2023-19_43_13.csv', bucketname, unique_hipaa_filename, ExtraArgs={'ACL':'public-read'}) elif 'azure' in cloud_provider: + blob_endpoint = os.environ["BLOB_ENDPOINT"] if "BLOB_ENDPOINT" in os.environ else "https://cnappdemo.blob.core.windows.net/" + container_name = os.environ["CONTAINER_NAME"] if "CONTAINER_NAME" in os.environ else "cnappdemo" default_credential = DefaultAzureCredential() - blob_service_client = BlobServiceClient("https://cnappdemo.blob.core.windows.net/",credential=default_credential) - container_client = blob_service_client.get_container_client("cnappdemo") + blob_service_client = BlobServiceClient(blob_endpoint,credential=default_credential) + container_client = blob_service_client.get_container_client(container_name) with open('/plz_del/FritzFrog/001eb377f0452060012124cb214f658754c7488ccb82e23ec56b2f45a636c859', "rb") as data: blob_client = container_client.upload_blob(name=unique_malware_filename, data=data) with open('/plz_del/FritzFrog/10-MB-Test.docx', "rb") as data: @@ -40,6 +43,7 @@ def do_GET(self): self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(bytes("Prisma Cloud CNAPP Darwin Demo with out policy", "utf-8")) + self.wfile.write(bytes("Prisma Cloud Demo without block policy", "utf-8")) self.wfile.write(bytes("

Host Requested: %s

" % self.headers.get('Host'), "utf-8")) self.wfile.write(bytes("

XFF Requested: %s

" % self.headers.get("X-Forwarded-For"), "utf-8")) self.wfile.write(bytes("

Command: %s

" % self.command, "utf-8"))