diff --git a/.github/workflows/pr-issue-validator.yaml b/.github/workflows/pr-issue-validator.yaml index 0a053f6ed0c..e75dd437a62 100644 --- a/.github/workflows/pr-issue-validator.yaml +++ b/.github/workflows/pr-issue-validator.yaml @@ -29,6 +29,10 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + - name: Validate Issue Reference env: @@ -150,3 +154,84 @@ jobs: gh pr edit $PRNUM --remove-label "PR:Ready-to-Review" exit 1 fi + - name: Check SQL file format and duplicates + shell: bash + env: + pr_no: ${{ github.event.pull_request.number }} + GH_TOKEN: ${{ github.token }} + run: | + + # Fetch the latest changes from the main branch + git fetch origin main + + # Get the list of changed files + git diff origin/main...HEAD --name-only > diff + + echo "Changed files:" + cat diff + + echo "Changed SQL files-:" + # Filter SQL files from the list of changed files + awk '/scripts\/sql\//' diff + + # Count the number of changed SQL files in the 'scripts/sql' directory + count=$(awk '/scripts\/sql\//' diff | wc -l) + + # Check if no SQL files were changed + if [[ $count == "0" ]]; then + echo "No SQL files were added, Exiting from this action." + exit 0 + fi + + # Iterate through each changed SQL file + for filename in $(awk '/scripts\/sql\//' diff); do + echo "Checking File: $filename" + + # Check if the SQL file name is in the correct format (i.e., it ends with either '.up.sql' or '.down.sql') + if [[ "$filename" =~ \.(up|down)\.sql$ ]]; then + + # Print a message that the file name is in the correct format + echo "File name: $filename is in the correct format" + else + # Print an error message + echo "Error: The SQL file name is not in the correct format: $filename." + + # Post a comment on a GitHub pull request with the error message + gh pr comment $pr_no --body "The SQL file name: $filename is not in the correct format." + + # Exit the script with a non-zero status code + exit 1 + fi + + # Navigate to the SQL files directory + sql_dir="scripts/sql" + echo "Current directory: $(pwd)" + cd "$sql_dir" + echo "SQL files directory: $(pwd)" + + # Extract the migration number from the SQL file name + migration_no=$(echo "$filename" | cut -d "/" -f 3 | cut -d "_" -f 1) + echo "Migration Number: $migration_no" + + # Count the number of files with the same migration number + migration_files_present_of_this_no=$(ls | cut -d "_" -f 1 | grep -w -c "$migration_no") + + # Navigate back to the original directory + cd ../.. + + # Check the conditions based on the number of files with the same migration number + if [[ $migration_files_present_of_this_no == "2" ]]; then + echo "All looks good for this migration number." + elif [[ $migration_files_present_of_this_no == "1" ]]; then + # Only one file is present for this migration number + echo "Only single migration file was present for migration no.: $migration_no. either up or down migration is missing! EXITING" + gh pr comment $pr_no --body "Error: Only a single migration file was present for this number: $migration_no." + exit 1 + else + # Migration number is repeated + echo "Error: Migration number is repeated." + gh pr comment $pr_no --body "Error: The SQL file number: $migration_no is duplicated" + exit 1 + fi + done + diff --git a/assets/copa-plugin-icon.png b/assets/copa-plugin-icon.png new file mode 100644 index 00000000000..0039b17c00c Binary files /dev/null and b/assets/copa-plugin-icon.png differ diff --git a/assets/dockerslim-plugin-icon.png b/assets/dockerslim-plugin-icon.png new file mode 100644 index 00000000000..6653a8edb6e Binary files /dev/null and b/assets/dockerslim-plugin-icon.png differ diff --git a/assets/eks-plugin-icon.svg b/assets/eks-plugin-icon.svg new file mode 100644 index 00000000000..ad73eb4e528 --- /dev/null +++ b/assets/eks-plugin-icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 32dbb92da51..4512ef81e3b 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -245,6 +245,7 @@ func (impl *CiServiceImpl) TriggerCiPipeline(trigger types.Trigger) (int, error) } else { workflowRequest.Type = bean2.CI_WORKFLOW_PIPELINE_TYPE } + err = impl.executeCiPipeline(workflowRequest) if err != nil { impl.Logger.Errorw("workflow error", "err", err) @@ -659,6 +660,8 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. RegistryDestinationImageMap: registryDestinationImageMap, RegistryCredentialMap: registryCredentialMap, PluginArtifactStage: pluginArtifactStage, + ImageScanMaxRetries: impl.config.ImageScanMaxRetries, + ImageScanRetryDelay: impl.config.ImageScanRetryDelay, } if dockerRegistry != nil { diff --git a/pkg/pipeline/types/CiCdConfig.go b/pkg/pipeline/types/CiCdConfig.go index b5446c85b2c..b5a6ae11e15 100644 --- a/pkg/pipeline/types/CiCdConfig.go +++ b/pkg/pipeline/types/CiCdConfig.go @@ -65,6 +65,8 @@ type CiCdConfig struct { SkipCreatingEcrRepo bool `env:"SKIP_CREATING_ECR_REPO" envDefault:"false"` MaxCiWorkflowRetries int `env:"MAX_CI_WORKFLOW_RETRIES" envDefault:"0"` NatsServerHost string `env:"NATS_SERVER_HOST" envDefault:"nats://devtron-nats.devtroncd:4222"` + ImageScanMaxRetries int `env:"IMAGE_SCAN_MAX_RETRIES" envDefault:"3"` + ImageScanRetryDelay int `env:"IMAGE_SCAN_RETRY_DELAY" envDefault:"5"` // from CdConfig CdLimitCpu string `env:"CD_LIMIT_CI_CPU" envDefault:"0.5"` CdLimitMem string `env:"CD_LIMIT_CI_MEM" envDefault:"3G"` diff --git a/pkg/pipeline/types/Workflow.go b/pkg/pipeline/types/Workflow.go index 327c484a26c..9a749bd9fb6 100644 --- a/pkg/pipeline/types/Workflow.go +++ b/pkg/pipeline/types/Workflow.go @@ -126,6 +126,8 @@ type WorkflowRequest struct { RegistryCredentialMap map[string]plugin.RegistryCredentials `json:"registryCredentialMap"` PluginArtifactStage string `json:"pluginArtifactStage"` PushImageBeforePostCI bool `json:"pushImageBeforePostCI"` + ImageScanMaxRetries int `json:"imageScanMaxRetries,omitempty"` + ImageScanRetryDelay int `json:"imageScanRetryDelay,omitempty"` Type bean.WorkflowPipelineType Pipeline *pipelineConfig.Pipeline Env *repository.Environment diff --git a/scripts/sql/211_image_scan_plugin_update.down.sql b/scripts/sql/211_image_scan_plugin_update.down.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/scripts/sql/211_image_scan_plugin_update.up.sql b/scripts/sql/211_image_scan_plugin_update.up.sql new file mode 100644 index 00000000000..e04261f8d06 --- /dev/null +++ b/scripts/sql/211_image_scan_plugin_update.up.sql @@ -0,0 +1,27 @@ +INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value", "default_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES + (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Vulnerability Scanning' and ps."index"=1 and ps.deleted=false), 'IMAGE_SCAN_MAX_RETRIES','STRING','image scan max retry count',true,true,'3','INPUT','GLOBAL',1 ,'IMAGE_SCAN_MAX_RETRIES','f','now()', 1, 'now()', 1), + (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Vulnerability Scanning' and ps."index"=1 and ps.deleted=false), 'IMAGE_SCAN_RETRY_DELAY','STRING','image scan retry delay (in seconds)',true,true,'5','INPUT','GLOBAL',1 ,'IMAGE_SCAN_RETRY_DELAY','f','now()', 1, 'now()', 1); + +UPDATE plugin_pipeline_script SET script = '#!/bin/sh +echo "IMAGE SCAN" + +perform_curl_request() { + local attempt=1 + while [ "$attempt" -le "$IMAGE_SCAN_MAX_RETRIES" ]; do + response=$(curl -s -w "\n%{http_code}" -X POST $IMAGE_SCANNER_ENDPOINT/scanner/image -H "Content-Type: application/json" -d "{\"image\": \"$DEST\", \"imageDigest\": \"$DIGEST\", \"pipelineId\" : $PIPELINE_ID, \"userId\": $TRIGGERED_BY, \"dockerRegistryId\": \"$DOCKER_REGISTRY_ID\" }") + http_status=$(echo "$response" | tail -n1) + if [ "$http_status" = "200" ]; then + echo "Vulnerability Scanning request successful." + return 0 + else + echo "Attempt $attempt: Vulnerability Scanning request failed with HTTP status code $http_status" + echo "Response Body: $response" + attempt=$((attempt + 1)) + sleep "$IMAGE_SCAN_RETRY_DELAY" + fi + done + echo -e "\033[1m======== Maximum retries reached. Vulnerability Scanning request failed ========" + exit 1 +} +perform_curl_request' +WHERE id = (SELECT id FROM plugin_metadata WHERE name = 'Vulnerability Scanning'); \ No newline at end of file diff --git a/scripts/sql/212_dockerslim_plugin.down.sql b/scripts/sql/212_dockerslim_plugin.down.sql new file mode 100644 index 00000000000..0618c0332df --- /dev/null +++ b/scripts/sql/212_dockerslim_plugin.down.sql @@ -0,0 +1,6 @@ +DELETE FROM plugin_stage_mapping where plugin_id=(SELECT id from plugin_metadata where name='DockerSlim v1.0.0'); +DELETE FROM plugin_step where plugin_id=(SELECT id FROM plugin_metadata WHERE name='DockerSlim v1.0.0'); +DELETE FROM plugin_step_variable where plugin_step_id=(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='DockerSlim v1.0.0' and ps."index"=1 and ps.deleted=false); +DELETE FROM pipeline_stage_step WHERE name ='DockerSlim v1.0.0'; +DELETE FROM plugin_tag_relation WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='DockerSlim v1.0.0'); +DELETE FROM plugin_metadata where name='DockerSlim v1.0.0'; \ No newline at end of file diff --git a/scripts/sql/212_dockerslim_plugin.up.sql b/scripts/sql/212_dockerslim_plugin.up.sql new file mode 100644 index 00000000000..81fbd1f6831 --- /dev/null +++ b/scripts/sql/212_dockerslim_plugin.up.sql @@ -0,0 +1,58 @@ +INSERT INTO plugin_metadata (id,name,description,type,icon,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_metadata'),'DockerSlim v1.0.0','This plugin is used to Slim the docker images (Currently this plugin can be used only for docker images not for docker buildx images).','PRESET','https://raw.githubusercontent.com/devtron-labs/devtron/main/assets/dockerslim-plugin-icon.png',false,'now()',1,'now()',1); + +INSERT INTO "plugin_tag_relation" ("id", "tag_id", "plugin_id", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_tag_relation'), (SELECT id FROM plugin_tag WHERE name='DevSecOps'), (SELECT id FROM plugin_metadata WHERE name='DockerSlim v1.0.0'),'now()', 1, 'now()', 1); + +INSERT INTO plugin_stage_mapping (id,plugin_id,stage_type,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_stage_mapping'),(SELECT id from plugin_metadata where name='DockerSlim v1.0.0'), 0,'now()',1,'now()',1); + +INSERT INTO "plugin_pipeline_script" ("id", "script","type","deleted","created_on", "created_by", "updated_on", "updated_by") +VALUES ( + nextval('id_seq_plugin_pipeline_script'), + $$#!/bin/sh +httpProbe=$(echo "$HTTPProbe" | tr "[:upper:]" "[:lower:]") +includeFilePath=$IncludePathFile + +export tag=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerImageTag) +export repo=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerRepository) +export registry=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerRegistryURL) + +cd /devtroncd + +docker pull dslim/slim + +if [ "$httpProbe" == "true" ]; then + if [ -n "$includeFilePath" ]; then + docker run -i --rm -v /var/run/docker.sock:/var/run/docker.sock -v $PWD:$PWD dslim/slim build --http-probe=true --target $repo:$tag --tag $repo:$tag --continue-after=2 --include-path-file $includeFilePath + else + docker run -i --rm -v /var/run/docker.sock:/var/run/docker.sock -v $PWD:$PWD dslim/slim build --http-probe=true --target $repo:$tag --tag $repo:$tag --continue-after=2 + fi +elif [ -n "$includeFilePath" ]; then + docker run -i --rm -v /var/run/docker.sock:/var/run/docker.sock -v $PWD:$PWD dslim/slim build --http-probe=false --target $repo:$tag --tag $repo:$tag --continue-after=2 --include-path-file $includeFilePath +else + docker run -i --rm -v /var/run/docker.sock:/var/run/docker.sock -v $PWD:$PWD dslim/slim build --http-probe=false --target $repo:$tag --tag $repo:$tag --continue-after=2 +fi + +docker push $registry/$repo:$tag + +# Check the exit code of the last command +if [ $? -eq 0 ]; then + echo "-----------***** Success: Docker-slim images built successfully *****-----------" +else + echo "-----------***** Error: Docker-slim build failed, we are pushing original image to the container registry *****-----------" +fi$$, + 'SHELL', + 'f', + 'now()', + 1, + 'now()', + 1 +); + +INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='DockerSlim v1.0.0'),'Step 1','Step 1 - DockerSlim','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='DockerSlim v1.0.0' and ps."index"=1 and ps.deleted=false),'HTTPProbe','BOOL','Is port expose or not in Dockerfile','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='DockerSlim v1.0.0' and ps."index"=1 and ps.deleted=false),'IncludePathFile','STRING','File path contains including path for dockerslim build flag --include-path-file','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); diff --git a/scripts/sql/213_eks_cluster_creation.down.sql b/scripts/sql/213_eks_cluster_creation.down.sql new file mode 100644 index 00000000000..dc0da59670d --- /dev/null +++ b/scripts/sql/213_eks_cluster_creation.down.sql @@ -0,0 +1,6 @@ +DELETE FROM plugin_stage_mapping where plugin_id=(SELECT id from plugin_metadata where name='EKS Create Cluster v1.0.0'); +DELETE FROM plugin_step where plugin_id=(SELECT id FROM plugin_metadata WHERE name='EKS Create Cluster v1.0.0'); +DELETE FROM plugin_step_variable where plugin_step_id=(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false); +DELETE FROM pipeline_stage_step WHERE name ='EKS Create Cluster v1.0.0'; +DELETE FROM plugin_tag_relation WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='EKS Create Cluster v1.0.0'); +DELETE FROM plugin_metadata where name='EKS Create Cluster v1.0.0'; \ No newline at end of file diff --git a/scripts/sql/213_eks_cluster_creation.up.sql b/scripts/sql/213_eks_cluster_creation.up.sql new file mode 100644 index 00000000000..af885a1122a --- /dev/null +++ b/scripts/sql/213_eks_cluster_creation.up.sql @@ -0,0 +1,178 @@ +INSERT INTO plugin_metadata (id,name,description,type,icon,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_metadata'),'EKS Create Cluster v1.0.0','Plugin to provision a EKS cluster in AWS','PRESET','https://raw.githubusercontent.com/devtron-labs/devtron/main/assets/eks-plugin-icon.svg',false,'now()',1,'now()',1); + +INSERT INTO plugin_tag (id, name, deleted, created_on, created_by, updated_on, updated_by) +SELECT + nextval('id_seq_plugin_tag'), + 'AWS EKS', + false, + 'now()', + 1, + 'now()', + 1 +WHERE NOT EXISTS ( + SELECT 1 + FROM plugin_tag + WHERE name = 'AWS EKS' +); + +INSERT INTO "plugin_tag_relation" ("id", "tag_id", "plugin_id", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_tag_relation'), (SELECT id FROM plugin_tag WHERE name='AWS EKS'), (SELECT id FROM plugin_metadata WHERE name='EKS Create Cluster v1.0.0'),'now()', 1, 'now()', 1); + +INSERT INTO plugin_stage_mapping (id,plugin_id,stage_type,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_stage_mapping'),(SELECT id from plugin_metadata where name='EKS Create Cluster v1.0.0'), 0,'now()',1,'now()',1); + +INSERT INTO "plugin_pipeline_script" ("id", "script","type","deleted","created_on", "created_by", "updated_on", "updated_by") +VALUES ( + nextval('id_seq_plugin_pipeline_script'), + $$#!/bin/sh +set -e + +ENABLE_PLUGIN=$(echo "$EnablePlugin" | tr "[:upper:]" "[:lower:]") +AUTOMATED_NAME=$(echo "$AutomatedName" | tr "[:upper:]" "[:lower:]") +CLUSTER_NAME="${ClusterName}" +VERSION="${Version}" +REGION="${Region}" +ZONES="${Zones}" +NODEGROUP_NAME="${NodeGroupName:-linux-nodes}" +NODE_TYPE="${NodeType:-m5.large}" +DESIRED_NODES="${DesiredNodes:-1}" +MIN_NODES="${MinNodes:-0}" +MAX_NODES="${MaxNodes:-3}" +USE_IAM_NODE_ROLE=$(echo "$UseIAMNodeRole" | tr "[:upper:]" "[:lower:]") +USE_CONFIG_FILE=$(echo "$UseEKSConfigFile" | tr "[:upper:]" "[:lower:]") +CONFIG_FILE_PATH="${EKSConfigFilePath}" +AWS_ACCESS_KEY_ID=$AWSAccessKeyId +AWS_SECRET_ACCESS_KEY=$AWSSecretAccessKey + +if [ "$AUTOMATED_NAME" == "true" ]; then + if [ -z "$CLUSTER_NAME" ]; then + echo "Error: CLUSTER_NAME is empty. Exiting the script." + exit 1 + fi + + # Generate a random suffix for the cluster name + RANDOM_SUFFIX=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 4) + + # Define the regex pattern + PATTERN='^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$' + + # Check if the random suffix matches the pattern, if not, regenerate + while [[ ! "$RANDOM_SUFFIX" =~ $PATTERN ]]; do + RANDOM_SUFFIX=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 4) + done + + # Check if the cluster name matches the regex, if not, use a default name + if [[ ! "$CLUSTER_NAME" =~ $PATTERN ]]; then + echo "Error: CLUSTER_NAME does not match the required regex. Using a default name." + CLUSTER_NAME="default-devtron-cluster" + fi + + CLUSTER_NAME="${CLUSTER_NAME}-${RANDOM_SUFFIX}" + echo "The random generated cluster name is ${CLUSTER_NAME}" +fi + +curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp +mv /tmp/eksctl /usr/local/bin + +if [ "$ENABLE_PLUGIN" == "true" ]; then + # Check if IAM node role is used + if [ "$USE_IAM_NODE_ROLE" == "true" ]; then + echo "Using IAM node role for AWS credentials" + AWS_CLI_CONFIG="/home/tekton/.aws" + mkdir -p "$AWS_CLI_CONFIG" + else + # Check if AWS credentials are provided + if [ -z "$AWSAccessKeyId" ] || [ -z "$AWSSecretAccessKey" ]; then + echo "Error: AWS credentials not provided. Set USE_IAM_NODE_ROLE=true or provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY." + exit 1 + fi + + echo "exporting aws credentials" + export AWS_ACCESS_KEY_ID="$AWSAccessKeyId" + export AWS_SECRET_ACCESS_KEY="$AWSSecretAccessKey" + fi + + # Check if using EKS config file + if [ "$USE_CONFIG_FILE" == "true" ]; then + if [ -z "$CONFIG_FILE_PATH" ]; then + echo "Error: EKS config file path not provided. Set CONFIG_FILE_PATH when USE_CONFIG_FILE=true." + exit 1 + fi + + # Create EKS cluster using config file + echo "************ Using Eksctl config file to create the cluster ***************" + eksctl create cluster --config-file "/devtroncd/$CONFIG_FILE_PATH" --kubeconfig /devtroncd/kubeconfig.yaml + + else + if [[ -z "$CLUSTER_NAME" ]]; then + echo "Error: ClusterName should not be empty. Exiting the script." + exit 1 + fi + if [[ -z "$VERSION" ]]; then + echo "Error: Version should not be empty. Exiting the script." + exit 1 + fi + if [[ -z "$REGION" ]]; then + echo "Error: Region should not be empty. Exiting the script." + exit 1 + fi + echo "************** Creating Eksctl cluster using the parameters provided in plugin **************" + # Create EKS cluster using specified parameters + eksctl create cluster \\ + --name "$CLUSTER_NAME" \\ + --version "$VERSION" \\ + --region "$REGION" \\ + --zones "$ZONES" \\ + --nodegroup-name "$NODEGROUP_NAME" \\ + --node-type "$NODE_TYPE" \\ + --nodes "$DESIRED_NODES" \\ + --nodes-min "$MIN_NODES" \\ + --nodes-max "$MAX_NODES" \\ + --kubeconfig /devtroncd/kubeconfig.yaml + fi + + # Check if the cluster creation was successful + if [ $? -eq 0 ]; then + echo "***** Successfully created EKS cluster: $CLUSTER_NAME *****" + export CreatedClusterName=$CLUSTER_NAME + # Write kubeconfig to the specified workspace + export EKSKubeConfigPath=/devtroncd/kubeconfig.yaml + else + echo "Error: Failed to create EKS cluster: $CLUSTER_NAME" + exit 1 + fi +else + echo "Error: Please enable the plugin to create plugin" + exit 1 +fi$$, + 'SHELL', + 'f', + 'now()', + 1, + 'now()', + 1 +); + +INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='EKS Create Cluster v1.0.0'),'Step 1','Step 1 - EKS Create Cluster','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'EnablePlugin','BOOL','True or False to enable plugin','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'AutomatedName','BOOL','True or False to enabling Random name of the cluster creation based on the ClusterName provided','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'UseIAMNodeRole','BOOL','True or False to use IAM Node Role for EKS Cluster creation ','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'AWSAccessKeyId','STRING','AWS Access Key ID','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'AWSSecretAccessKey','STRING','AWS Secret Access KEY','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'ClusterName','STRING','Provide the Cluster Name','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'Version','STRING','Version of the EKS Cluster to create','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'Region','STRING','AWS Region for EKS Cluster','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'Zones','STRING','Availability Zone for EKS Cluster','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'NodeGroupName','STRING','NodeGroup Name for EKS Cluster','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'NodeType','STRING','EC2 instance type for NodeGroup','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'DesiredNodes','STRING','No. of Desired nodes in NodeGroup','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'MinNodes','STRING','No. of Minimum nodes in NodeGroup','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'MaxNodes','STRING','No. of Maximum nodes in NodeGroup','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'UseEKSConfigFile','BOOL','True or False to use ConfigFile for EKS Cluster creation','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'EKSConfigFilePath','STRING','Path for EKS config file','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'CreatedClusterName','STRING','The EKS cluster created name','t','f',false,null,'OUTPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='EKS Create Cluster v1.0.0' and ps."index"=1 and ps.deleted=false),'EKSKubeConfigPath','STRING','The Kubeconfig path of EKS','t','f',false,null,'OUTPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); \ No newline at end of file diff --git a/scripts/sql/214_copacetic_plugin_v1_0_0.down.sql b/scripts/sql/214_copacetic_plugin_v1_0_0.down.sql new file mode 100644 index 00000000000..5f9933bc864 --- /dev/null +++ b/scripts/sql/214_copacetic_plugin_v1_0_0.down.sql @@ -0,0 +1,8 @@ +DELETE FROM plugin_step_variable WHERE plugin_step_id=(SELECT ps.id FROM plugin_metadata p INNER JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copacetic v1.0.0' and ps."index"=1 and ps.deleted=false); +DELETE FROM plugin_step WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Copacetic v1.0.0'); +DELETE FROM plugin_stage_mapping WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Copacetic v1.0.0'); +DELETE FROM pipeline_stage_step_variable WHERE pipeline_stage_step_id in (SELECT pipeline_stage_id FROM pipeline_stage_step WHERE name='Copacetic v1.0.0'); +DELETE FROM pipeline_stage_step_variable WHERE pipeline_stage_step_id in (SELECT id FROM pipeline_stage_step WHERE name='Copacetic v1.0.0'); +DELETE FROM pipeline_stage_step WHERE name ='Copacetic v1.0.0'; +DELETE FROM plugin_tag_relation WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Copacetic v1.0.0'); +DELETE FROM plugin_metadata WHERE name='Copacetic v1.0.0'; \ No newline at end of file diff --git a/scripts/sql/214_copacetic_plugin_v1_0_0.up.sql b/scripts/sql/214_copacetic_plugin_v1_0_0.up.sql new file mode 100644 index 00000000000..db969a2a1d5 --- /dev/null +++ b/scripts/sql/214_copacetic_plugin_v1_0_0.up.sql @@ -0,0 +1,81 @@ +INSERT INTO plugin_metadata (id,name,description,type,icon,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_metadata'),'Copacetic v1.0.0','This plugin is used to patch the container image vulnerabilities (Patching for Multi Architecture Builds not supported currently).','PRESET','https://raw.githubusercontent.com/devtron-labs/devtron/main/assets/copa-plugin-icon.png',false,'now()',1,'now()',1); + +INSERT INTO "plugin_tag_relation" ("id", "tag_id", "plugin_id", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_tag_relation'), (SELECT id FROM plugin_tag WHERE name='Security'), (SELECT id FROM plugin_metadata WHERE name='Copacetic v1.0.0'),'now()', 1, 'now()', 1); +INSERT INTO "plugin_tag_relation" ("id", "tag_id", "plugin_id", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_tag_relation'), (SELECT id FROM plugin_tag WHERE name='DevSecOps'), (SELECT id FROM plugin_metadata WHERE name='Copacetic v1.0.0'),'now()', 1, 'now()', 1); + +INSERT INTO plugin_stage_mapping (id,plugin_id,stage_type,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_stage_mapping'),(SELECT id from plugin_metadata where name='Copacetic v1.0.0'), 0,'now()',1,'now()',1); + +INSERT INTO "plugin_pipeline_script" ("id", "script","type","deleted","created_on", "created_by", "updated_on", "updated_by") +VALUES ( + nextval('id_seq_plugin_pipeline_script'), + $$#!/bin/sh + +export appName=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.appName) +export registry=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerRegistryURL) +export repo=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerRepository) +export tag=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerImageTag) +export platform=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.ciBuildConfig.dockerBuildConfig.targetPlatform) + +if [[ $platform == "linux/arm64,linux/amd64" ]] ; then + echo "platform = $platform" + echo "------------------------------------------------------------------------------------------------------------------------" + echo "######### Patching Multi Architecture Image not supported #########" + echo "------------------------------------------------------------------------------------------------------------------------" + echo "error: can't use copacetic plugin for muti-architecture builds" + exit 1; +else + curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.46.1 + + uname_arch() { + arch=$(uname -m) + case $arch in + x86_64) arch="amd64" ;; + aarch64) arch="arm64" ;; + esac + echo ${arch} + } + os=$(uname | tr "[:upper:]" "[:lower:]") + uname_arch + wget https://github.com/project-copacetic/copacetic/releases/download/v0.5.1/copa_0.5.1_${os}_${arch}.tar.gz + tar -xvzf copa_0.5.1_${os}_${arch}.tar.gz + mv copa /usr/local/bin/ + + trivy image --vuln-type os --ignore-unfixed $registry/$repo:$tag | grep -i total + trivy image --vuln-type os --ignore-unfixed -f json -o $appName.json $registry/$repo:$tag + + export BUILDKIT_VERSION=v0.12.0 + docker run \ + --detach \ + --rm \ + --privileged \ + --name buildkitd \ + --entrypoint buildkitd \ + "moby/buildkit:$BUILDKIT_VERSION" + + copa patch -i $registry/$repo:$tag -r $appName.json -t $tag --addr docker-container://buildkitd --timeout "$CopaTimeout" + if [ $? -eq 0 ] ; then + trivy image --vuln-type os --ignore-unfixed $registry/$repo:$tag | grep -i total + docker push $registry/$repo:$tag + else + echo "------------------------------------------------------------------------------------------------------------------------" + echo "BUILD FAILED: non zero exit status during copa patch..." + echo "------------------------------------------------------------------------------------------------------------------------" + exit 1; + fi +fi +$$, + 'SHELL', + 'f', + 'now()', + 1, + 'now()', + 1 +); + +INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Copacetic v1.0.0'),'Step 1','Step 1 - Copacetic v1.0.0','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copacetic v1.0.0' and ps."index"=1 and ps.deleted=false),'CopaTimeout','STRING','Timeout for copa patch command, default timeout is 5 minutes. For ex: 10m','t','t','5m',null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1);