From e378454ba4ab62d8d203802a596e992df62686f0 Mon Sep 17 00:00:00 2001 From: Indranil Banerjee Date: Mon, 2 Dec 2024 20:34:35 -0800 Subject: [PATCH 01/11] Added Cloudformation template --- .../MSKAndKafkaClientEC2.yaml | 952 ++++++++++++++++++ .../{template.yaml => template_original.yaml} | 3 + 2 files changed, 955 insertions(+) create mode 100644 msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml rename msk-lambda-iam-python-sam/{template.yaml => template_original.yaml} (97%) diff --git a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml new file mode 100644 index 000000000..863def0bd --- /dev/null +++ b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml @@ -0,0 +1,952 @@ +AWSTemplateFormatVersion: '2010-09-09' +Parameters: + EnvType: + Description: MSK Cluster Type. + Default: Provisioned + Type: String + AllowedValues: + - Serverless + - Provisioned + ConstraintDescription: Must specify Serverless or Provisioned. + LatestAmiId: + Type: 'AWS::SSM::Parameter::Value' + Default: 'al2023-ami-kernel-default-x86_64' + MSKKafkaVersion: + Type: String + Default: 3.5.1 + ApacheKafkaInstallerLocation: + Type: String + Default: https://archive.apache.org/dist/kafka/3.5.1/kafka_2.13-3.5.1.tgz + KafkaTopicForLambda: + Type: String + Default: MskIamJavaLambdaTopic + ServerlessLandGithubLocation: + Type: String + Default: https://github.com/aws-samples/serverless-patterns/ +Conditions: + CreateProvisionedCluster: !Equals + - !Ref EnvType + - Provisioned + CreateServerlessCluster: !Equals + - !Ref EnvType + - Serverless +Mappings: + SubnetConfig: + VPC: + CIDR: '10.0.0.0/16' + PublicOne: + CIDR: '10.0.0.0/24' + PrivateSubnetMSKOne: + CIDR: '10.0.1.0/24' + PrivateSubnetMSKTwo: + CIDR: '10.0.2.0/24' + PrivateSubnetMSKThree: + CIDR: '10.0.3.0/24' +Resources: + VPC: + Type: AWS::EC2::VPC + Properties: + EnableDnsSupport: true + EnableDnsHostnames: true + CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR'] + Tags: + - Key: 'Name' + Value: 'MSKVPC' + + PublicSubnetOne: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PublicOne', 'CIDR'] + MapPublicIpOnLaunch: true + Tags: + - Key: 'Name' + Value: 'PublicSubnet' + PrivateSubnetMSKOne: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKOne', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKOne' + PrivateSubnetMSKTwo: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 1 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKTwo', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKTwo' + PrivateSubnetMSKThree: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 2 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKThree', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKThree' + + InternetGateway: + Type: AWS::EC2::InternetGateway + GatewayAttachement: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + VpcId: !Ref 'VPC' + InternetGatewayId: !Ref 'InternetGateway' + + NATEIP: + Type: AWS::EC2::EIP + DependsOn: GatewayAttachement + Properties: + Domain: vpc + + NATGateway: + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt NATEIP.AllocationId + SubnetId: !Ref 'PublicSubnetOne' + Tags: + - Key: 'Name' + Value: 'ConfluentKafkaNATGateway' + + PublicRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref 'VPC' + PublicRoute: + Type: AWS::EC2::Route + DependsOn: GatewayAttachement + Properties: + RouteTableId: !Ref 'PublicRouteTable' + DestinationCidrBlock: '0.0.0.0/0' + GatewayId: !Ref 'InternetGateway' + PublicSubnetOneRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PublicSubnetOne + RouteTableId: !Ref PublicRouteTable + + PrivateRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref 'VPC' + + PrivateRoute: + Type: AWS::EC2::Route + DependsOn: NATGateway + Properties: + RouteTableId: !Ref 'PrivateRouteTable' + DestinationCidrBlock: '0.0.0.0/0' + NatGatewayId: !Ref 'NATGateway' + + PrivateSubnetMSKOneRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKOne + PrivateSubnetMSKTwoRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKTwo + PrivateSubnetMSKThreeRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKThree + + KafkaClientInstanceSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Enable SSH access via port 22 from BastionHostSecurityGroup + GroupName: !Sub "${AWS::StackName} Security group attached to the kakfa client producer" + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3500 + ToPort: 3500 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3600 + ToPort: 3600 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3800 + ToPort: 3800 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3900 + ToPort: 3900 + CidrIp: 10.0.0.0/24 + + MSKSecurityGroup: + Type: AWS::EC2::SecurityGroup + DependsOn: [VPC,KafkaClientInstanceSecurityGroup] + Properties: + GroupDescription: MSK Security Group + GroupName: !Sub "${AWS::StackName} Security group for the MSK cluster" + VpcId: !Ref 'VPC' + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 2181 + ToPort: 2181 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9094 + ToPort: 9094 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9096 + ToPort: 9096 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9092 + ToPort: 9092 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9098 + ToPort: 9098 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 8083 + ToPort: 8083 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 8081 + ToPort: 8081 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + + MSKSelfIngressAllowRule: + Type: AWS::EC2::SecurityGroupIngress + DependsOn: MSKSecurityGroup + Properties: + GroupId: !GetAtt MSKSecurityGroup.GroupId + Description: Enable Self referencing Bootstrap servers + IpProtocol: tcp + FromPort: 9092 + ToPort: 9098 + SourceSecurityGroupId: !GetAtt MSKSecurityGroup.GroupId + + KafkaClientSelfIngressAllowRule: + Type: AWS::EC2::SecurityGroupIngress + DependsOn: KafkaClientInstanceSecurityGroup + Properties: + GroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + IpProtocol: tcp + FromPort: 22 + ToPort: 22 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + + KafkaClientEC2InstanceProvisioned: + Condition: CreateProvisionedCluster + DependsOn: MSKCluster + Type: AWS::EC2::Instance + Properties: + InstanceType: m5.large + IamInstanceProfile: !Ref EC2InstanceProfile + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + SubnetId: !Ref PublicSubnetOne + SecurityGroupIds: [!GetAtt KafkaClientInstanceSecurityGroup.GroupId] + ImageId: !Ref LatestAmiId + Tags: + - Key: 'Name' + Value: 'KafkaClientInstance' + BlockDeviceMappings: + - DeviceName: /dev/xvda + Ebs: + VolumeSize: 50 + VolumeType: gp2 + DeleteOnTermination: true + UserData: + Fn::Base64: + !Sub + - | + #!/bin/bash + yum update -y + yum install java-openjdk11-devel -y + yum install nmap-ncat -y + yum install git -y + yum erase awscli -y + yum install jq -y + amazon-linux-extras install docker -y + sudo dnf install python3 + sudo pip install boto3 + service docker start + usermod -a -G docker ec2-user + + # install AWS CLI 2 - access with aws2 + cd /home/ec2-user + mkdir -p awscli + cd awscli + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + # Create dirs, get Apache Kafka and unpack it + cd /home/ec2-user + KAFKA_VERSION=${msk_kafka_version} + KAFKA_FOLDER_VERSION=$(echo "$KAFKA_VERSION" | tr -d '.') + KAFKA_FOLDER='Kafka'$KAFKA_FOLDER_VERSION + mkdir -p $KAFKA_FOLDER + mkdir -p /tmp/kafka + ln -s /home/ec2-user/$KAFKA_FOLDER /home/ec2-user/kafka + cd $KAFKA_FOLDER + APACHE_KAFKA_INSTALLER_LOCATION=${apache_kafka_installer_location} + wget $APACHE_KAFKA_INSTALLER_LOCATION + APACHE_KAFKA_INSTALLER_FILE=$(echo "$APACHE_KAFKA_INSTALLER_LOCATION" | awk -F "/" '{print $NF}') + tar -xzf $APACHE_KAFKA_INSTALLER_FILE --strip 1 + cd libs + wget https://github.com/aws/aws-msk-iam-auth/releases/download/v2.2.0/aws-msk-iam-auth-2.2.0-all.jar + cd ../bin + echo "security.protocol=SASL_SSL" > client.properties + echo "sasl.mechanism=AWS_MSK_IAM" >> client.properties + echo "sasl.jaas.config=software.amazon.msk.auth.iam.IAMLoginModule required;" >> client.properties + echo "sasl.client.callback.handler.class=software.amazon.msk.auth.iam.IAMClientCallbackHandler" >> client.properties + + # Install AWS SAM CLI + cd /home/ec2-user + mkdir -p awssam + cd awssam + wget https://github.com/aws/aws-sam-cli/releases/latest/download/aws-sam-cli-linux-x86_64.zip + unzip aws-sam-cli-linux-x86_64.zip -d sam-installation + sudo ./sam-installation/install + + # Create command files for creating Kafka Topic and Kafka Producer + cd /home/ec2-user + MSK_CLUSTER_ARN=${msk_cluster_arn} + KAFKA_TOPIC=${kafka_topic_for_lambda} + echo "#!/bin/bash" > kafka_topic_creator.sh + sudo chmod +x kafka_topic_creator.sh + echo "MSK_CLUSTER_ARN=$MSK_CLUSTER_ARN" >> kafka_topic_creator.sh + AWS_REGION=$(curl -fsq http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/[a-z]$//') + echo "AWS_REGION=$AWS_REGION" >> kafka_topic_creator.sh + echo "BOOTSTRAP_BROKERS_IAM=\$(aws kafka get-bootstrap-brokers --region \$AWS_REGION --cluster-arn \$MSK_CLUSTER_ARN --query 'BootstrapBrokerStringSaslIam' --output text)" >> kafka_topic_creator.sh + echo "sleep 5" >> kafka_topic_creator.sh + echo "KAFKA_TOPIC=$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "/home/ec2-user/kafka/bin/kafka-topics.sh --create --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --command-config /home/ec2-user/kafka/bin/client.properties --replication-factor 3 --partitions 3 --topic \$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "echo \"export MSK_CLUSTER_ARN=\$MSK_CLUSTER_ARN\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export AWS_REGION=\$AWS_REGION\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export BOOTSTRAP_BROKERS_IAM=\$BOOTSTRAP_BROKERS_IAM\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export KAFKA_TOPIC=\$KAFKA_TOPIC\" >> .bash_profile" >> kafka_topic_creator.sh + echo "#!/bin/bash" > kafka_message_sender.sh + echo "source /home/ec2-user/.bash_profile" >> kafka_message_sender.sh + echo "/home/ec2-user/kafka/bin/kafka-console-producer.sh --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --producer.config /home/ec2-user/kafka/bin/client.properties --topic $KAFKA_TOPIC" >> kafka_message_sender.sh + sudo chmod +x kafka_message_sender.sh + CLUSTER_NAME="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f2)" + CLUSTER_ID="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f3)" + echo "export CLUSTER_NAME=$CLUSTER_NAME" >> /home/ec2-user/.bash_profile + echo "export CLUSTER_ID=$CLUSTER_ID" >> /home/ec2-user/.bash_profile + ./kafka_topic_creator.sh > kafka_topic_creator_output.txt + + #Checkout Serverless Patterns from Github + cd /home/ec2-user + SERVERLESS_LAND_GITHUB_LOCATION=${serverless_land_github_location} + git clone $SERVERLESS_LAND_GITHUB_LOCATION + cd ./serverless-patterns/msk-lambda-iam-python-sam + cp template_original.yaml template.yaml + sudo chown -R ec2-user . + source /home/ec2-user/.bash_profile + sed -i "s/CLUSTER_NAME/$CLUSTER_NAME/g" template.yaml + sed -i "s/CLUSTER_ID/$CLUSTER_ID/g" template.yaml + sed -i "s/KAFKA_TOPIC/$KAFKA_TOPIC/g" template.yaml + + # Get IP CIDR range for EC2 Instance Connect + cd /home/ec2-user + mkdir -p ip_prefix + cd ip_prefix + git clone https://github.com/joetek/aws-ip-ranges-json.git + cd aws-ip-ranges-json + AWS_REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/') + EC2_CONNECT_IP=$(cat ip-ranges-ec2-instance-connect.json | jq -r --arg AWS_REGION "$AWS_REGION" '.prefixes[] | select(.region==$AWS_REGION).ip_prefix') + echo "export EC2_CONNECT_IP=$EC2_CONNECT_IP" >> /home/ec2-user/.bash_profile + SECURITY_GROUP=${security_group_id} + echo "export SECURITY_GROUP=$SECURITY_GROUP" >> /home/ec2-user/.bash_profile + aws ec2 authorize-security-group-ingress --region $AWS_REGION --group-id $SECURITY_GROUP --protocol tcp --port 22 --cidr $EC2_CONNECT_IP + + - security_group_id : !GetAtt KafkaClientInstanceSecurityGroup.GroupId + msk_cluster_arn : !GetAtt MSKCluster.Arn + kafka_topic_for_lambda : !Ref KafkaTopicForLambda + msk_kafka_version: !Ref MSKKafkaVersion + apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation + serverless_land_github_location: !Ref ServerlessLandGithubLocation + + KafkaClientEC2InstanceServerless: + Condition: CreateServerlessCluster + DependsOn: ServerlessMSKCluster + Type: AWS::EC2::Instance + Properties: + InstanceType: m5.large + IamInstanceProfile: !Ref EC2InstanceProfile + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + SubnetId: !Ref PublicSubnetOne + SecurityGroupIds: [!GetAtt KafkaClientInstanceSecurityGroup.GroupId] + ImageId: !Ref LatestAmiId + Tags: + - Key: 'Name' + Value: 'KafkaClientInstance' + BlockDeviceMappings: + - DeviceName: /dev/xvda + Ebs: + VolumeSize: 50 + VolumeType: gp2 + DeleteOnTermination: true + UserData: + Fn::Base64: + !Sub + - | + #!/bin/bash + yum update -y + yum install java-openjdk11-devel -y + yum install nmap-ncat -y + yum install git -y + yum erase awscli -y + yum install jq -y + amazon-linux-extras install docker -y + sudo dnf install python3 + sudo pip install boto3 + service docker start + usermod -a -G docker ec2-user + + # install AWS CLI 2 - access with aws2 + cd /home/ec2-user + mkdir -p awscli + cd awscli + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + # Create dirs, get Apache Kafka and unpack it + cd /home/ec2-user + KAFKA_VERSION=${msk_kafka_version} + KAFKA_FOLDER_VERSION=$(echo "$KAFKA_VERSION" | tr -d '.') + KAFKA_FOLDER='Kafka'$KAFKA_FOLDER_VERSION + mkdir -p $KAFKA_FOLDER + mkdir -p /tmp/kafka + ln -s /home/ec2-user/$KAFKA_FOLDER /home/ec2-user/kafka + cd $KAFKA_FOLDER + APACHE_KAFKA_INSTALLER_LOCATION=${apache_kafka_installer_location} + wget $APACHE_KAFKA_INSTALLER_LOCATION + APACHE_KAFKA_INSTALLER_FILE=$(echo "$APACHE_KAFKA_INSTALLER_LOCATION" | awk -F "/" '{print $NF}') + tar -xzf $APACHE_KAFKA_INSTALLER_FILE --strip 1 + cd libs + wget https://github.com/aws/aws-msk-iam-auth/releases/download/v2.2.0/aws-msk-iam-auth-2.2.0-all.jar + cd ../bin + echo "security.protocol=SASL_SSL" > client.properties + echo "sasl.mechanism=AWS_MSK_IAM" >> client.properties + echo "sasl.jaas.config=software.amazon.msk.auth.iam.IAMLoginModule required;" >> client.properties + echo "sasl.client.callback.handler.class=software.amazon.msk.auth.iam.IAMClientCallbackHandler" >> client.properties + + # Install AWS SAM CLI + cd /home/ec2-user + mkdir -p awssam + cd awssam + wget https://github.com/aws/aws-sam-cli/releases/latest/download/aws-sam-cli-linux-x86_64.zip + unzip aws-sam-cli-linux-x86_64.zip -d sam-installation + sudo ./sam-installation/install + + # Create command files for creating Kafka Topic and Kafka Producer + cd /home/ec2-user + MSK_CLUSTER_ARN=${msk_cluster_arn} + KAFKA_TOPIC=${kafka_topic_for_lambda} + echo "#!/bin/bash" > kafka_topic_creator.sh + sudo chmod +x kafka_topic_creator.sh + echo "MSK_CLUSTER_ARN=$MSK_CLUSTER_ARN" >> kafka_topic_creator.sh + AWS_REGION=$(curl -fsq http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/[a-z]$//') + echo "AWS_REGION=$AWS_REGION" >> kafka_topic_creator.sh + echo "BOOTSTRAP_BROKERS_IAM=\$(aws kafka get-bootstrap-brokers --region \$AWS_REGION --cluster-arn \$MSK_CLUSTER_ARN --query 'BootstrapBrokerStringSaslIam' --output text)" >> kafka_topic_creator.sh + echo "sleep 5" >> kafka_topic_creator.sh + echo "KAFKA_TOPIC=$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "/home/ec2-user/kafka/bin/kafka-topics.sh --create --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --command-config /home/ec2-user/kafka/bin/client.properties --replication-factor 3 --partitions 3 --topic \$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "echo \"export MSK_CLUSTER_ARN=\$MSK_CLUSTER_ARN\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export AWS_REGION=\$AWS_REGION\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export BOOTSTRAP_BROKERS_IAM=\$BOOTSTRAP_BROKERS_IAM\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export KAFKA_TOPIC=\$KAFKA_TOPIC\" >> .bash_profile" >> kafka_topic_creator.sh + echo "#!/bin/bash" > kafka_message_sender.sh + echo "source /home/ec2-user/.bash_profile" >> kafka_message_sender.sh + echo "/home/ec2-user/kafka/bin/kafka-console-producer.sh --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --producer.config /home/ec2-user/kafka/bin/client.properties --topic $KAFKA_TOPIC" >> kafka_message_sender.sh + sudo chmod +x kafka_message_sender.sh + CLUSTER_NAME="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f2)" + CLUSTER_ID="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f3)" + echo "export CLUSTER_NAME=$CLUSTER_NAME" >> /home/ec2-user/.bash_profile + echo "export CLUSTER_ID=$CLUSTER_ID" >> /home/ec2-user/.bash_profile + ./kafka_topic_creator.sh > kafka_topic_creator_output.txt + + #Checkout Serverless Patterns from Github + cd /home/ec2-user + SERVERLESS_LAND_GITHUB_LOCATION=${serverless_land_github_location} + git clone $SERVERLESS_LAND_GITHUB_LOCATION + cd ./serverless-patterns/msk-lambda-iam-python-sam + cp template_original.yaml template.yaml + sudo chown -R ec2-user . + source /home/ec2-user/.bash_profile + sed -i "s/CLUSTER_NAME/$CLUSTER_NAME/g" template.yaml + sed -i "s/CLUSTER_ID/$CLUSTER_ID/g" template.yaml + sed -i "s/KAFKA_TOPIC/$KAFKA_TOPIC/g" template.yaml + + # Get IP CIDR range for EC2 Instance Connect + cd /home/ec2-user + mkdir -p ip_prefix + cd ip_prefix + git clone https://github.com/joetek/aws-ip-ranges-json.git + cd aws-ip-ranges-json + AWS_REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/') + EC2_CONNECT_IP=$(cat ip-ranges-ec2-instance-connect.json | jq -r --arg AWS_REGION "$AWS_REGION" '.prefixes[] | select(.region==$AWS_REGION).ip_prefix') + echo "export EC2_CONNECT_IP=$EC2_CONNECT_IP" >> /home/ec2-user/.bash_profile + SECURITY_GROUP=${security_group_id} + echo "export SECURITY_GROUP=$SECURITY_GROUP" >> /home/ec2-user/.bash_profile + aws ec2 authorize-security-group-ingress --region $AWS_REGION --group-id $SECURITY_GROUP --protocol tcp --port 22 --cidr $EC2_CONNECT_IP + + - security_group_id : !GetAtt KafkaClientInstanceSecurityGroup.GroupId + msk_cluster_arn : !GetAtt MSKCluster.Arn + kafka_topic_for_lambda : !Ref KafkaTopicForLambda + msk_kafka_version: !Ref MSKKafkaVersion + apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation + serverless_land_github_location: !Ref ServerlessLandGithubLocation + + EC2InstanceEndpoint: + Type: AWS::EC2::InstanceConnectEndpoint + Properties: + PreserveClientIp: true + SecurityGroupIds: + - !GetAtt KafkaClientInstanceSecurityGroup.GroupId + SubnetId: !Ref PublicSubnetOne + + EC2Role: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Sid: '' + Effect: Allow + Principal: + Service: ec2.amazonaws.com + Action: 'sts:AssumeRole' + Path: "/" + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonMSKFullAccess + - arn:aws:iam::aws:policy/AWSCloudFormationFullAccess + - arn:aws:iam::aws:policy/CloudWatchFullAccess + - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - arn:aws:iam::aws:policy/AmazonS3FullAccess + - arn:aws:iam::aws:policy/AWSCertificateManagerPrivateCAFullAccess + - arn:aws:iam::aws:policy/IAMFullAccess + - arn:aws:iam::aws:policy/AWSLambda_FullAccess + Policies: + - PolicyName: MSKConfigurationAccess + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": "kafka:CreateConfiguration", + "Resource": "*" + } + ] + }' + - PolicyName: CloudformationDeploy + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "iam:*" + ], + "Resource": "*" + } + ] + }' + - PolicyName: MSKProducerPermissions + PolicyDocument: + Version: 2012-10-17 + Statement: + - Sid: SecretsAccess + Effect: Allow + Action: + - 'secretsmanager:*' + - 'kms:*' + - 'glue:*Schema*' + - 'iam:CreatePolicy' + - 'iam:Tag*' + - 'iam:AttachRolePolicy' + Resource: '*' + - PolicyName: MSKConnectAuthentication + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:*Topic*", + "kafka-cluster:Connect", + "kafka-cluster:AlterCluster", + "kafka-cluster:DescribeCluster", + "kafka-cluster:DescribeClusterDynamicConfiguration" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:cluster/${AWS::StackName}-cluster/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:*Topic*", + "kafka-cluster:WriteData", + "kafka-cluster:ReadData" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:topic/${AWS::StackName}-cluster/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:AlterGroup", + "kafka-cluster:DescribeGroup" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:group/${AWS::StackName}-cluster/*" + ] + } + ] + }' + - PolicyName: SecurityGroupsPolicy + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeSecurityGroups", + "ec2:DescribeSecurityGroupRules", + "ec2:DescribeTags" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:RevokeSecurityGroupEgress", + "ec2:ModifySecurityGroupRules", + "ec2:UpdateSecurityGroupRuleDescriptionsIngress", + "ec2:UpdateSecurityGroupRuleDescriptionsEgress" + ], + "Resource": [ + "arn:aws:ec2:${AWS::Region}:${AWS::AccountId}:security-group/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:ModifySecurityGroupRules" + ], + "Resource": [ + "arn:aws:ec2:${AWS::Region}:${AWS::AccountId}:security-group-rule/*" + ] + } + ] + }' + + EC2InstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + InstanceProfileName: !Join + - '-' + - - 'EC2MMMSKCFProfile' + - !Ref 'AWS::StackName' + Roles: + - !Ref EC2Role + + + MSKCertAuthority: + Type: AWS::ACMPCA::CertificateAuthority + Condition: CreateProvisionedCluster + Properties: + KeyAlgorithm: "RSA_4096" + SigningAlgorithm: "SHA256WITHRSA" + Subject: + Country: "US" + Type: "ROOT" + + MSKCert: + Type: AWS::ACMPCA::Certificate + Condition: CreateProvisionedCluster + Properties: + CertificateAuthorityArn: !Ref MSKCertAuthority + CertificateSigningRequest: !GetAtt + - MSKCertAuthority + - CertificateSigningRequest + SigningAlgorithm: "SHA256WITHRSA" + TemplateArn: arn:aws:acm-pca:::template/RootCACertificate/V1 + Validity: + Type: YEARS + Value: 10 + + RootCAActivation: + Type: AWS::ACMPCA::CertificateAuthorityActivation + Condition: CreateProvisionedCluster + Properties: + CertificateAuthorityArn: + Ref: MSKCertAuthority + Certificate: + Fn::GetAtt: + - MSKCert + - Certificate + Status: ACTIVE + + RootCAPermission: + Type: AWS::ACMPCA::Permission + Condition: CreateProvisionedCluster + Properties: + Actions: + - IssueCertificate + - GetCertificate + - ListPermissions + CertificateAuthorityArn: !Ref MSKCertAuthority + Principal: acm.amazonaws.com + + CredentialsKMSKey: + Type: AWS::KMS::Key + Condition: CreateProvisionedCluster + Properties: + Description: "KMS key to use with credentials secret with KMS" + EnableKeyRotation: True + KeyPolicy: + Version: "2012-10-17" + Id: key-default-1 + Statement: + - Sid: Enable IAM User Permissions + Effect: Allow + Principal: + AWS: !Join + - '' + - - 'arn:aws:iam::' + - !Ref 'AWS::AccountId' + - ':root' + Action: 'kms:*' + Resource: '*' + - Sid: Enable Secret Manager Permissions + Effect: Allow + Principal: + AWS: "*" + Action: + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: '*' + Condition: + StringEquals: + kms:CallerAccount: !Ref 'AWS::AccountId' + kms:ViaService: !Join + - '' + - - 'secretsmanager.' + - !Ref 'AWS::Region' + - '.amazonaws.com' + PendingWindowInDays: 7 + + CredentialsKMSKeyAlias: + Type: AWS::KMS::Alias + Condition: CreateProvisionedCluster + Properties: + AliasName: alias/mskstack_secret_manager_key + TargetKeyId: !Ref 'CredentialsKMSKey' + + CredentialsSecret: + Type: AWS::SecretsManager::Secret + Condition: CreateProvisionedCluster + Properties: + Description: "Secret to use for SCRAM Auth" + Name: "AmazonMSK_Credentials" + GenerateSecretString: + SecretStringTemplate: '{"username": "test-user"}' + GenerateStringKey: "password" + PasswordLength: 30 + ExcludeCharacters: '"@/\' + KmsKeyId: !Ref 'CredentialsKMSKey' + + MSKConfiguration: + Type: AWS::MSK::Configuration + Condition: CreateProvisionedCluster + Properties: + Description: "MSKConfiguration" + Name: "MSKConfiguration" + ServerProperties: | + auto.create.topics.enable=true + default.replication.factor=3 + min.insync.replicas=2 + num.io.threads=8 + num.network.threads=5 + num.partitions=1 + num.replica.fetchers=2 + replica.lag.time.max.ms=30000 + socket.receive.buffer.bytes=102400 + socket.request.max.bytes=104857600 + socket.send.buffer.bytes=102400 + unclean.leader.election.enable=true + zookeeper.session.timeout.ms=18000 + delete.topic.enable=true + log.retention.hours=8 + + MSKCluster: + Type: AWS::MSK::Cluster + Condition: CreateProvisionedCluster + Properties: + BrokerNodeGroupInfo: + ClientSubnets: + - !Ref PrivateSubnetMSKOne + - !Ref PrivateSubnetMSKTwo + - !Ref PrivateSubnetMSKThree + SecurityGroups: + - !GetAtt MSKSecurityGroup.GroupId + InstanceType: "kafka.m5.large" + StorageInfo: + EBSStorageInfo: + VolumeSize: 100 + ClientAuthentication: + Unauthenticated: + Enabled: False + Sasl: + Iam: + Enabled: True + Scram: + Enabled: True + Tls: + CertificateAuthorityArnList: + - !Ref MSKCertAuthority + Enabled: True + ClusterName: !Sub "${AWS::StackName}-cluster" + ConfigurationInfo: + Arn: !Ref MSKConfiguration + Revision: 1 + EncryptionInfo: + EncryptionInTransit: + ClientBroker: TLS + InCluster: True + KafkaVersion: !Ref MSKKafkaVersion + NumberOfBrokerNodes: 3 + + SecretMSKAssociation: + Type: AWS::MSK::BatchScramSecret + Condition: CreateProvisionedCluster + Properties: + ClusterArn: !Ref MSKCluster + SecretArnList: + - !Ref CredentialsSecret + + ServerlessMSKCluster: + Type: AWS::MSK::ServerlessCluster + Condition: CreateServerlessCluster + Properties: + ClientAuthentication: + Sasl: + Iam: + Enabled: True + ClusterName: !Sub "${AWS::StackName}-cluster" + VpcConfigs: + - SubnetIds: + - !Ref PrivateSubnetMSKOne + - !Ref PrivateSubnetMSKTwo + - !Ref PrivateSubnetMSKThree + SecurityGroups: + - !GetAtt MSKSecurityGroup.GroupId + +Outputs: + VPCId: + Description: The ID of the VPC created + Value: !Ref 'VPC' + Export: + Name: !Sub "${AWS::StackName}-VPCID" + PublicSubnetOne: + Description: The name of the public subnet created + Value: !Ref 'PublicSubnetOne' + Export: + Name: !Sub "${AWS::StackName}-PublicSubnetOne" + PrivateSubnetMSKOne: + Description: The ID of private subnet one created + Value: !Ref 'PrivateSubnetMSKOne' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKOne" + PrivateSubnetMSKTwo: + Description: The ID of private subnet two created + Value: !Ref 'PrivateSubnetMSKTwo' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKTwo" + PrivateSubnetMSKThree: + Description: The ID of private subnet three created + Value: !Ref 'PrivateSubnetMSKThree' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKThree" + VPCStackName: + Description: The name of the VPC Stack + Value: !Ref 'AWS::StackName' + Export: + Name: !Sub "${AWS::StackName}-VPCStackName" + MSKArn: + Description: Provisioned MSK Cluster ARN. + Value: !Ref MSKCluster + Export: + Name: !Sub "${AWS::StackName}-MSKArn" + Condition: "CreateProvisionedCluster" + CredentialsSecretArn: + Description: ARN for secret manager secret with credentials. + Value: !Ref CredentialsSecret + Export: + Name: !Sub "${AWS::StackName}-CredentialsSecret" + Condition: "CreateProvisionedCluster" + ServerlessMSKArn: + Description: Serverless MSK Cluster ARN. + Value: !Ref ServerlessMSKCluster + Export: + Name: !Sub "${AWS::StackName}-Serverless" + Condition: "CreateServerlessCluster" + SecurityGroupId: + Description: ID of scurity group for MSK clients. + Value: !GetAtt MSKSecurityGroup.GroupId + Export: + Name: !Sub "${AWS::StackName}-SecurityGroupId" + EC2InstanceEndpointID: + Description: The ID of the EC2 Instance Endpoint + Value: !Ref EC2InstanceEndpoint + KafkaTopicForLambda: + Description: The Topic to use for the Java Lambda Function + Value: !Ref KafkaTopicForLambda + Export: + Name: !Sub "${AWS::StackName}-KafkaTopicForLambda" + \ No newline at end of file diff --git a/msk-lambda-iam-python-sam/template.yaml b/msk-lambda-iam-python-sam/template_original.yaml similarity index 97% rename from msk-lambda-iam-python-sam/template.yaml rename to msk-lambda-iam-python-sam/template_original.yaml index c84194185..14c658b06 100644 --- a/msk-lambda-iam-python-sam/template.yaml +++ b/msk-lambda-iam-python-sam/template_original.yaml @@ -71,12 +71,15 @@ Parameters: MSKClusterName: Type: String Description: Enter the name of the MSK Cluster + Default: CLUSTER_NAME MSKClusterId: Type: String Description: Enter the ID of the MSK Cluster + Default: CLUSTER_ID MSKTopic: Type: String Description: Enter the name of the MSK Topic + Default: KAFKA_TOPIC Outputs: LambdaMSKConsumerPythonFunction: Description: "Topic Consumer Lambda Function ARN" From 7cbb47bb4e32b6140e5f021e103d37a436ff5a5f Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Wed, 4 Dec 2024 14:02:58 -0800 Subject: [PATCH 02/11] Fixed Cloudformation Template and SAM template --- .../MSKAndKafkaClientEC2.yaml | 70 ++++-- msk-lambda-iam-python-sam/README 2.md | 211 ++++++++++++++++++ .../template_original.yaml | 2 +- 3 files changed, 263 insertions(+), 20 deletions(-) create mode 100644 msk-lambda-iam-python-sam/README 2.md diff --git a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml index 863def0bd..f5c7562ea 100644 --- a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml +++ b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml @@ -10,7 +10,20 @@ Parameters: ConstraintDescription: Must specify Serverless or Provisioned. LatestAmiId: Type: 'AWS::SSM::Parameter::Value' - Default: 'al2023-ami-kernel-default-x86_64' + Default: '/aws/service/ami-amazon-linux-latest/al2023-ami-kernel-6.1-x86_64' + Python3Version: + Type: String + Description: Choose the version of Python 3 between 3.9 and 3.12. Note that in Amazon Linux 2023, 3.9 is installed by default and maximum allowed version is 3.12 + AllowedValues: + - python3.9 + - python3.10 + - python3.11 + - python3.12 + Default: python3.12 + EC2KeyPair: + Type: AWS::EC2::KeyPair::KeyName + Description: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-key-pairs.html + Default: my-key-pair MSKKafkaVersion: Type: String Default: 3.5.1 @@ -275,6 +288,7 @@ Resources: SubnetId: !Ref PublicSubnetOne SecurityGroupIds: [!GetAtt KafkaClientInstanceSecurityGroup.GroupId] ImageId: !Ref LatestAmiId + KeyName: !Ref EC2KeyPair Tags: - Key: 'Name' Value: 'KafkaClientInstance' @@ -290,18 +304,24 @@ Resources: - | #!/bin/bash yum update -y - yum install java-openjdk11-devel -y + sudo yum install ec2-instance-connect + sudo yum install java-11-amazon-corretto-devel -y yum install nmap-ncat -y yum install git -y - yum erase awscli -y yum install jq -y - amazon-linux-extras install docker -y - sudo dnf install python3 - sudo pip install boto3 - service docker start - usermod -a -G docker ec2-user + + #install latest python3 + PYTHON3_VERSION=${python3_version} + sudo yum install $PYTHON3_VERSION -y + echo "export PYTHON3_VERSION=$PYTHON3_VERSION" >> /home/ec2-user/.bash_profile + + # install and start docker + sudo yum install -y docker + sudo service docker start + sudo usermod -a -G docker ec2-user # install AWS CLI 2 - access with aws2 + sudo yum remove awscli cd /home/ec2-user mkdir -p awscli cd awscli @@ -345,7 +365,7 @@ Resources: echo "#!/bin/bash" > kafka_topic_creator.sh sudo chmod +x kafka_topic_creator.sh echo "MSK_CLUSTER_ARN=$MSK_CLUSTER_ARN" >> kafka_topic_creator.sh - AWS_REGION=$(curl -fsq http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/[a-z]$//') + AWS_REGION=${aws_region} echo "AWS_REGION=$AWS_REGION" >> kafka_topic_creator.sh echo "BOOTSTRAP_BROKERS_IAM=\$(aws kafka get-bootstrap-brokers --region \$AWS_REGION --cluster-arn \$MSK_CLUSTER_ARN --query 'BootstrapBrokerStringSaslIam' --output text)" >> kafka_topic_creator.sh echo "sleep 5" >> kafka_topic_creator.sh @@ -376,6 +396,7 @@ Resources: sed -i "s/CLUSTER_NAME/$CLUSTER_NAME/g" template.yaml sed -i "s/CLUSTER_ID/$CLUSTER_ID/g" template.yaml sed -i "s/KAFKA_TOPIC/$KAFKA_TOPIC/g" template.yaml + sed -i "s/PYTHON3_VERSION/$PYTHON3_VERSION/g" template.yaml # Get IP CIDR range for EC2 Instance Connect cd /home/ec2-user @@ -383,7 +404,7 @@ Resources: cd ip_prefix git clone https://github.com/joetek/aws-ip-ranges-json.git cd aws-ip-ranges-json - AWS_REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/') + AWS_REGION=${aws_region} EC2_CONNECT_IP=$(cat ip-ranges-ec2-instance-connect.json | jq -r --arg AWS_REGION "$AWS_REGION" '.prefixes[] | select(.region==$AWS_REGION).ip_prefix') echo "export EC2_CONNECT_IP=$EC2_CONNECT_IP" >> /home/ec2-user/.bash_profile SECURITY_GROUP=${security_group_id} @@ -396,6 +417,8 @@ Resources: msk_kafka_version: !Ref MSKKafkaVersion apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation serverless_land_github_location: !Ref ServerlessLandGithubLocation + python3_version: !Ref Python3Version + aws_region: !Ref 'AWS::Region' KafkaClientEC2InstanceServerless: Condition: CreateServerlessCluster @@ -426,18 +449,24 @@ Resources: - | #!/bin/bash yum update -y - yum install java-openjdk11-devel -y + sudo yum install ec2-instance-connect + sudo yum install java-11-amazon-corretto-devel -y yum install nmap-ncat -y yum install git -y - yum erase awscli -y yum install jq -y - amazon-linux-extras install docker -y - sudo dnf install python3 - sudo pip install boto3 - service docker start - usermod -a -G docker ec2-user + + #install latest python3 + PYTHON3_VERSION=${python3_version} + sudo yum install $PYTHON3_VERSION -y + echo "export PYTHON3_VERSION=$PYTHON3_VERSION" >> /home/ec2-user/.bash_profile + + # install and start docker + sudo yum install -y docker + sudo service docker start + sudo usermod -a -G docker ec2-user # install AWS CLI 2 - access with aws2 + sudo yum remove awscli cd /home/ec2-user mkdir -p awscli cd awscli @@ -481,7 +510,7 @@ Resources: echo "#!/bin/bash" > kafka_topic_creator.sh sudo chmod +x kafka_topic_creator.sh echo "MSK_CLUSTER_ARN=$MSK_CLUSTER_ARN" >> kafka_topic_creator.sh - AWS_REGION=$(curl -fsq http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/[a-z]$//') + AWS_REGION=${aws_region} echo "AWS_REGION=$AWS_REGION" >> kafka_topic_creator.sh echo "BOOTSTRAP_BROKERS_IAM=\$(aws kafka get-bootstrap-brokers --region \$AWS_REGION --cluster-arn \$MSK_CLUSTER_ARN --query 'BootstrapBrokerStringSaslIam' --output text)" >> kafka_topic_creator.sh echo "sleep 5" >> kafka_topic_creator.sh @@ -512,6 +541,7 @@ Resources: sed -i "s/CLUSTER_NAME/$CLUSTER_NAME/g" template.yaml sed -i "s/CLUSTER_ID/$CLUSTER_ID/g" template.yaml sed -i "s/KAFKA_TOPIC/$KAFKA_TOPIC/g" template.yaml + sed -i "s/PYTHON3_VERSION/$PYTHON3_VERSION/g" template.yaml # Get IP CIDR range for EC2 Instance Connect cd /home/ec2-user @@ -519,7 +549,7 @@ Resources: cd ip_prefix git clone https://github.com/joetek/aws-ip-ranges-json.git cd aws-ip-ranges-json - AWS_REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/') + AWS_REGION=${aws_region} EC2_CONNECT_IP=$(cat ip-ranges-ec2-instance-connect.json | jq -r --arg AWS_REGION "$AWS_REGION" '.prefixes[] | select(.region==$AWS_REGION).ip_prefix') echo "export EC2_CONNECT_IP=$EC2_CONNECT_IP" >> /home/ec2-user/.bash_profile SECURITY_GROUP=${security_group_id} @@ -532,6 +562,8 @@ Resources: msk_kafka_version: !Ref MSKKafkaVersion apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation serverless_land_github_location: !Ref ServerlessLandGithubLocation + python3_version: !Ref Python3Version + aws_region: !Ref 'AWS::Region' EC2InstanceEndpoint: Type: AWS::EC2::InstanceConnectEndpoint diff --git a/msk-lambda-iam-python-sam/README 2.md b/msk-lambda-iam-python-sam/README 2.md new file mode 100644 index 000000000..c448e891d --- /dev/null +++ b/msk-lambda-iam-python-sam/README 2.md @@ -0,0 +1,211 @@ +# msk-lambda-iam-java-sam +# Java AWS Lambda Kafka consumer with IAM auth, using AWS SAM + +This pattern is an example of a Lambda function that consumes messages from an Amazon Managed Streaming for Kafka (Amazon MSK) topic, where the MSK Cluster has been configured to use IAM authentication. + +This project contains source code and supporting files for a serverless application that you can deploy with the SAM CLI. It includes the following files and folders. + +- kafka_event_consumer_function/src/main/java - Code for the application's Lambda function. +- events - Invocation events that you can use to invoke the function. +- kafka_event_consumer_function/src/test/java - Unit tests for the application code. +- template_original.yaml - A template that defines the application's Lambda function. +- MSKAndKafkaClientEC2.yaml - A Cloudformation template file that can be used to deploy an MSK cluster and also deploy an EC2 machine with all pre-requisities already installed, so you can directly build and deploy the lambda function and test it out. + +Important: this application uses various AWS services and there are costs associated with these services after the Free Tier usage - please see the [AWS Pricing page](https://aws.amazon.com/pricing/) for details. You are responsible for any AWS costs incurred. No warranty is implied in this example. + +## Requirements + +* [Create an AWS account](https://portal.aws.amazon.com/gp/aws/developer/registration/index.html) if you do not already have one and log in. The IAM user that you use must have sufficient permissions to make necessary AWS service calls and manage AWS resources. + +## Run the Cloudformation template to create the MSK Cluster and Client EC2 machine + +* [Run the Cloudformation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS Cloudformation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the Cloudformation stack to be created. This Cloudformation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 machine that you can use as a client. + +* [Connect to the EC2 machine] - Once the Cloudformation stack is created, you can go to the EC2 console and log into the machine using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. +Note: You may need to wait for some time after the Cloudformation stack is created, as some UserData scripts continue running after the Cloudformation stack shows Created. + +* [Check if Kafka Topic has been created] - Once you are inside the EC2 machine, you should be in the /home/ec2-user folder. Check to see the contents of the file kafka_topic_creator_output.txt by running the command cat kafka_topic_creator_output.txt. You should see an output such as "Created topic MskIamJavaLambdaTopic." + +If you are not able to find the file kafka_topic_creator_output.txt or if it is blank or you see an error message, then you need to run the file ./kafka_topic_creator.sh. This file runs a script that goes and creates the Kafka topic that the Lambda function will subscribe to. + +## Pre-requisites to Deploy the sample Lambda function + +The EC2 machine that was created by running the Cloudformation template has all the software that will be needed to deploy the Lambda function. + +The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. + +* Java - On the EC2 machine, we have installed java-openjdk11-devel but you can also install Amazon Corretto by modifying the Cloudformation UserData script if you wish to (https://docs.aws.amazon.com/linux/al2/ug/java.html) +* Maven - On the EC2 machine, we have installed Maven (https://maven.apache.org/install.html) +* AWS SAM CLI - We have installed the AWS SAM CLI (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +* Docker - We have installed the Docker Community Edition on the EC2 machine (https://hub.docker.com/search/?type=edition&offering=community) + +We have also cloned the Github repository for serverless-patterns on the EC2 machine already by running the below command + ``` + git clone https://github.com/aws-samples/serverless-patterns.git + ``` +Change directory to the pattern directory: + ``` + cd serverless-patterns/msk-lambda-iam-java-sam + ``` + +## Use the SAM CLI to build and test locally + +Build your application with the `sam build` command. + +```bash +sam build +``` + +The SAM CLI installs dependencies defined in `kafka_event_consumer_function/pom.xml`, creates a deployment package, and saves it in the `.aws-sam/build` folder. + +Test a single function by invoking it directly with a test event. An event is a JSON document that represents the input that the function receives from the event source. Test events are included in the `events` folder in this project. + +Run functions locally and invoke them with the `sam local invoke` command. + +```bash +sam local invoke --event events/event.json +``` + +You should see a response such as the below: + +``` +***** Begin sam local invoke response ***** + +Invoking com.amazonaws.services.lambda.samples.events.msk.HandlerMSK::handleRequest (java11) +Local image is up-to-date +Using local image: public.ecr.aws/lambda/java:11-rapid-x86_64. + +Mounting /home/ec2-user/serverless-patterns/msk-lambda-iam-java-sam/.aws-sam/build/LambdaMSKConsumerJavaFunction as /var/task:ro,delegated, inside +runtime container +START RequestId: 4484bb15-6749-4307-92d1-8ba2221e2218 Version: $LATEST +START RequestId: 4484bb15-6749-4307-92d1-8ba2221e2218 Version: $LATEST +Picked up JAVA_TOOL_OPTIONS: -XX:+TieredCompilation -XX:TieredStopAtLevel=1 +Received this message from Kafka - KafkaMessage [topic=myTopic, partition=0, timestamp=1678072110111, timestampType=CREATE_TIME, key=null, value=Zg==, decodedKey=null, decodedValue=f, headers=[]]Message in JSON format : { + "topic": "myTopic", + "partition": 0, + "offset": 250, + "timestamp": 1678072110111, + "timestampType": "CREATE_TIME", + "value": "Zg\u003d\u003d", + "decodedKey": "null", + "decodedValue": "f", + "headers": [] +}Received this message from Kafka - KafkaMessage [topic=myTopic, partition=0, timestamp=1678072111086, timestampType=CREATE_TIME, key=null, value=Zw==, decodedKey=null, decodedValue=g, headers=[]]Message in JSON format : { + "topic": "myTopic", + "partition": 0, + "offset": 251, + "timestamp": 1678072111086, + "timestampType": "CREATE_TIME", + "value": "Zw\u003d\u003d", + "decodedKey": "null", + "decodedValue": "g", + "headers": [] +}All Messages in this batch = [ + { + "topic": "myTopic", + "partition": 0, + "offset": 250, + "timestamp": 1678072110111, + "timestampType": "CREATE_TIME", + "value": "Zg\u003d\u003d", + "decodedKey": "null", + "decodedValue": "f", + "headers": [] + }, + { + "topic": "myTopic", + "partition": 0, + "offset": 251, + "timestamp": 1678072111086, + "timestampType": "CREATE_TIME", + "value": "Zw\u003d\u003d", + "decodedKey": "null", + "decodedValue": "g", + "headers": [] + } +]END RequestId: fc96203d-e0c0-4c30-b332-d16708b25d3e +REPORT RequestId: fc96203d-e0c0-4c30-b332-d16708b25d3e Init Duration: 0.06 ms Duration: 474.31 ms Billed Duration: 475 ms Memory Size: 512 MB Max Memory Used: 512 MB +"200 OK" + +***** End sam local invoke response ***** +``` + + +## Deploy the sample application + + +To deploy your application for the first time, run the following in your shell: + +```bash +sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-lambda-iam-java-sam --guided +``` + +The sam deploy command will package and deploy your application to AWS, with a series of prompts. You can accept all the defaults by hitting Enter: + +* **Stack Name**: The name of the stack to deploy to CloudFormation. This should be unique to your account and region, and a good starting point would be something matching your project name. +* **AWS Region**: The AWS region you want to deploy your app to. +* **Parameter MSKClusterName**: The name of the MSKCluster +* **Parameter MSKClusterId**: The unique ID of the MSKCluster +* **Parameter MSKTopic**: The Kafka topic on which the lambda function will listen on +* **Confirm changes before deploy**: If set to yes, any change sets will be shown to you before execution for manual review. If set to no, the AWS SAM CLI will automatically deploy application changes. +* **Allow SAM CLI IAM role creation**: Many AWS SAM templates, including this example, create AWS IAM roles required for the AWS Lambda function(s) included to access AWS services. By default, these are scoped down to minimum required permissions. To deploy an AWS CloudFormation stack which creates or modifies IAM roles, the `CAPABILITY_IAM` value for `capabilities` must be provided. If permission isn't provided through this prompt, to deploy this example you must explicitly pass `--capabilities CAPABILITY_IAM` to the `sam deploy` command. +* **Disable rollback**: Defaults to No and it preserves the state of previously provisioned resources when an operation fails +* **Save arguments to configuration file**: If set to yes, your choices will be saved to a configuration file inside the project, so that in the future you can just re-run `sam deploy` without parameters to deploy changes to your application. +* **SAM configuration file [samconfig.toml]**: Name of the configuration file to store configuration information locally +* **SAM configuration environment [default]**: Environment for storing deployment information locally + +You should get a message "Successfully created/updated stack - in " if all goes well + + +## Test the sample application + +Once the lambda function is deployed, send some Kafka messages on the topic that the lambda function is listening on, on the MSK server. + +For your convenience, a script has been created on the EC2 machine that was provisioned using Cloudformation. + +cd /home/ec2-user + +You should see a script called kafka_message_sender.sh. Run that script and you should be able to send a new Kafka message in every line as shown below + +``` +[ec2-user@ip-10-0-0-126 ~]$ sh kafka_message_sender.sh +>My first message +>My second message +>My third message +>My fourth message +>My fifth message +>My sixth message +>My seventh message +>My eigth message +>My ninth message +>My tenth message +>Ctrl-C +``` + +Either send at least 10 messages or wait for 300 seconds (check the values of BatchSize: 10 and MaximumBatchingWindowInSeconds: 300 in the template.yaml file) + +Then check Cloudwatch logs and you should see messages for the Cloudwatch Log Group with the name of the deployed Lambda function. + +The lambda code parses the Kafka messages and outputs the fields in the Kafka messages to Cloudwatch logs + +A single lambda function receives a batch of messages. The messages are received as a map with each key being a combination of the topic and the partition, as a single batch can receive messages from multiple partitions. + +Each key has a list of messages. Each Kafka message has the following properties - Topic, Partition, Offset, TimeStamp, TimeStampType, Key and Value + +The Key and Value are base64 encoded and have to be decoded. A message can also have a list of headers, each header having a key and a value. + +The code in this example prints out the fields in the Kafka message and also decrypts the key and the value and logs them in Cloudwatch logs. + +## Cleanup + +You can first clean-up the Lambda function by running the sam delete command + +``` +cd /home/ec2-user/serverless-patterns/msk-lambda-iam-java-sam +sam delete + +``` +confirm by pressing y for both the questions +You should see the lambda function getting deleted and a final confirmation "Deleted successfully" on the command-line + +Next you need to delete the Cloudformation template that created the MSK Server and the EC2 machine by going to the Cloudformation console and selecting the stack and then hitting the "Delete" button. It will run for sometime but eventually you should see the stack getting cleaned up. \ No newline at end of file diff --git a/msk-lambda-iam-python-sam/template_original.yaml b/msk-lambda-iam-python-sam/template_original.yaml index 14c658b06..01ef6df0d 100644 --- a/msk-lambda-iam-python-sam/template_original.yaml +++ b/msk-lambda-iam-python-sam/template_original.yaml @@ -17,7 +17,7 @@ Resources: Properties: CodeUri: HandlerKafka/ Handler: app.lambda_handler - Runtime: python3.9 + Runtime: PYTHON3_VERSION Architectures: - x86_64 Events: From 2313fb07b27fec44120fde3f3aad182c70fc3630 Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Wed, 4 Dec 2024 17:26:50 -0800 Subject: [PATCH 03/11] Readme.md --- .../MSKAndKafkaClientEC2.yaml | 5 - msk-lambda-iam-python-sam/README 2.md | 211 ------------------ msk-lambda-iam-python-sam/README.md | 186 +++++++++------ 3 files changed, 119 insertions(+), 283 deletions(-) delete mode 100644 msk-lambda-iam-python-sam/README 2.md diff --git a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml index f5c7562ea..343eb6760 100644 --- a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml +++ b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml @@ -20,10 +20,6 @@ Parameters: - python3.11 - python3.12 Default: python3.12 - EC2KeyPair: - Type: AWS::EC2::KeyPair::KeyName - Description: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-key-pairs.html - Default: my-key-pair MSKKafkaVersion: Type: String Default: 3.5.1 @@ -288,7 +284,6 @@ Resources: SubnetId: !Ref PublicSubnetOne SecurityGroupIds: [!GetAtt KafkaClientInstanceSecurityGroup.GroupId] ImageId: !Ref LatestAmiId - KeyName: !Ref EC2KeyPair Tags: - Key: 'Name' Value: 'KafkaClientInstance' diff --git a/msk-lambda-iam-python-sam/README 2.md b/msk-lambda-iam-python-sam/README 2.md deleted file mode 100644 index c448e891d..000000000 --- a/msk-lambda-iam-python-sam/README 2.md +++ /dev/null @@ -1,211 +0,0 @@ -# msk-lambda-iam-java-sam -# Java AWS Lambda Kafka consumer with IAM auth, using AWS SAM - -This pattern is an example of a Lambda function that consumes messages from an Amazon Managed Streaming for Kafka (Amazon MSK) topic, where the MSK Cluster has been configured to use IAM authentication. - -This project contains source code and supporting files for a serverless application that you can deploy with the SAM CLI. It includes the following files and folders. - -- kafka_event_consumer_function/src/main/java - Code for the application's Lambda function. -- events - Invocation events that you can use to invoke the function. -- kafka_event_consumer_function/src/test/java - Unit tests for the application code. -- template_original.yaml - A template that defines the application's Lambda function. -- MSKAndKafkaClientEC2.yaml - A Cloudformation template file that can be used to deploy an MSK cluster and also deploy an EC2 machine with all pre-requisities already installed, so you can directly build and deploy the lambda function and test it out. - -Important: this application uses various AWS services and there are costs associated with these services after the Free Tier usage - please see the [AWS Pricing page](https://aws.amazon.com/pricing/) for details. You are responsible for any AWS costs incurred. No warranty is implied in this example. - -## Requirements - -* [Create an AWS account](https://portal.aws.amazon.com/gp/aws/developer/registration/index.html) if you do not already have one and log in. The IAM user that you use must have sufficient permissions to make necessary AWS service calls and manage AWS resources. - -## Run the Cloudformation template to create the MSK Cluster and Client EC2 machine - -* [Run the Cloudformation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS Cloudformation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the Cloudformation stack to be created. This Cloudformation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 machine that you can use as a client. - -* [Connect to the EC2 machine] - Once the Cloudformation stack is created, you can go to the EC2 console and log into the machine using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. -Note: You may need to wait for some time after the Cloudformation stack is created, as some UserData scripts continue running after the Cloudformation stack shows Created. - -* [Check if Kafka Topic has been created] - Once you are inside the EC2 machine, you should be in the /home/ec2-user folder. Check to see the contents of the file kafka_topic_creator_output.txt by running the command cat kafka_topic_creator_output.txt. You should see an output such as "Created topic MskIamJavaLambdaTopic." - -If you are not able to find the file kafka_topic_creator_output.txt or if it is blank or you see an error message, then you need to run the file ./kafka_topic_creator.sh. This file runs a script that goes and creates the Kafka topic that the Lambda function will subscribe to. - -## Pre-requisites to Deploy the sample Lambda function - -The EC2 machine that was created by running the Cloudformation template has all the software that will be needed to deploy the Lambda function. - -The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. - -* Java - On the EC2 machine, we have installed java-openjdk11-devel but you can also install Amazon Corretto by modifying the Cloudformation UserData script if you wish to (https://docs.aws.amazon.com/linux/al2/ug/java.html) -* Maven - On the EC2 machine, we have installed Maven (https://maven.apache.org/install.html) -* AWS SAM CLI - We have installed the AWS SAM CLI (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) -* Docker - We have installed the Docker Community Edition on the EC2 machine (https://hub.docker.com/search/?type=edition&offering=community) - -We have also cloned the Github repository for serverless-patterns on the EC2 machine already by running the below command - ``` - git clone https://github.com/aws-samples/serverless-patterns.git - ``` -Change directory to the pattern directory: - ``` - cd serverless-patterns/msk-lambda-iam-java-sam - ``` - -## Use the SAM CLI to build and test locally - -Build your application with the `sam build` command. - -```bash -sam build -``` - -The SAM CLI installs dependencies defined in `kafka_event_consumer_function/pom.xml`, creates a deployment package, and saves it in the `.aws-sam/build` folder. - -Test a single function by invoking it directly with a test event. An event is a JSON document that represents the input that the function receives from the event source. Test events are included in the `events` folder in this project. - -Run functions locally and invoke them with the `sam local invoke` command. - -```bash -sam local invoke --event events/event.json -``` - -You should see a response such as the below: - -``` -***** Begin sam local invoke response ***** - -Invoking com.amazonaws.services.lambda.samples.events.msk.HandlerMSK::handleRequest (java11) -Local image is up-to-date -Using local image: public.ecr.aws/lambda/java:11-rapid-x86_64. - -Mounting /home/ec2-user/serverless-patterns/msk-lambda-iam-java-sam/.aws-sam/build/LambdaMSKConsumerJavaFunction as /var/task:ro,delegated, inside -runtime container -START RequestId: 4484bb15-6749-4307-92d1-8ba2221e2218 Version: $LATEST -START RequestId: 4484bb15-6749-4307-92d1-8ba2221e2218 Version: $LATEST -Picked up JAVA_TOOL_OPTIONS: -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -Received this message from Kafka - KafkaMessage [topic=myTopic, partition=0, timestamp=1678072110111, timestampType=CREATE_TIME, key=null, value=Zg==, decodedKey=null, decodedValue=f, headers=[]]Message in JSON format : { - "topic": "myTopic", - "partition": 0, - "offset": 250, - "timestamp": 1678072110111, - "timestampType": "CREATE_TIME", - "value": "Zg\u003d\u003d", - "decodedKey": "null", - "decodedValue": "f", - "headers": [] -}Received this message from Kafka - KafkaMessage [topic=myTopic, partition=0, timestamp=1678072111086, timestampType=CREATE_TIME, key=null, value=Zw==, decodedKey=null, decodedValue=g, headers=[]]Message in JSON format : { - "topic": "myTopic", - "partition": 0, - "offset": 251, - "timestamp": 1678072111086, - "timestampType": "CREATE_TIME", - "value": "Zw\u003d\u003d", - "decodedKey": "null", - "decodedValue": "g", - "headers": [] -}All Messages in this batch = [ - { - "topic": "myTopic", - "partition": 0, - "offset": 250, - "timestamp": 1678072110111, - "timestampType": "CREATE_TIME", - "value": "Zg\u003d\u003d", - "decodedKey": "null", - "decodedValue": "f", - "headers": [] - }, - { - "topic": "myTopic", - "partition": 0, - "offset": 251, - "timestamp": 1678072111086, - "timestampType": "CREATE_TIME", - "value": "Zw\u003d\u003d", - "decodedKey": "null", - "decodedValue": "g", - "headers": [] - } -]END RequestId: fc96203d-e0c0-4c30-b332-d16708b25d3e -REPORT RequestId: fc96203d-e0c0-4c30-b332-d16708b25d3e Init Duration: 0.06 ms Duration: 474.31 ms Billed Duration: 475 ms Memory Size: 512 MB Max Memory Used: 512 MB -"200 OK" - -***** End sam local invoke response ***** -``` - - -## Deploy the sample application - - -To deploy your application for the first time, run the following in your shell: - -```bash -sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-lambda-iam-java-sam --guided -``` - -The sam deploy command will package and deploy your application to AWS, with a series of prompts. You can accept all the defaults by hitting Enter: - -* **Stack Name**: The name of the stack to deploy to CloudFormation. This should be unique to your account and region, and a good starting point would be something matching your project name. -* **AWS Region**: The AWS region you want to deploy your app to. -* **Parameter MSKClusterName**: The name of the MSKCluster -* **Parameter MSKClusterId**: The unique ID of the MSKCluster -* **Parameter MSKTopic**: The Kafka topic on which the lambda function will listen on -* **Confirm changes before deploy**: If set to yes, any change sets will be shown to you before execution for manual review. If set to no, the AWS SAM CLI will automatically deploy application changes. -* **Allow SAM CLI IAM role creation**: Many AWS SAM templates, including this example, create AWS IAM roles required for the AWS Lambda function(s) included to access AWS services. By default, these are scoped down to minimum required permissions. To deploy an AWS CloudFormation stack which creates or modifies IAM roles, the `CAPABILITY_IAM` value for `capabilities` must be provided. If permission isn't provided through this prompt, to deploy this example you must explicitly pass `--capabilities CAPABILITY_IAM` to the `sam deploy` command. -* **Disable rollback**: Defaults to No and it preserves the state of previously provisioned resources when an operation fails -* **Save arguments to configuration file**: If set to yes, your choices will be saved to a configuration file inside the project, so that in the future you can just re-run `sam deploy` without parameters to deploy changes to your application. -* **SAM configuration file [samconfig.toml]**: Name of the configuration file to store configuration information locally -* **SAM configuration environment [default]**: Environment for storing deployment information locally - -You should get a message "Successfully created/updated stack - in " if all goes well - - -## Test the sample application - -Once the lambda function is deployed, send some Kafka messages on the topic that the lambda function is listening on, on the MSK server. - -For your convenience, a script has been created on the EC2 machine that was provisioned using Cloudformation. - -cd /home/ec2-user - -You should see a script called kafka_message_sender.sh. Run that script and you should be able to send a new Kafka message in every line as shown below - -``` -[ec2-user@ip-10-0-0-126 ~]$ sh kafka_message_sender.sh ->My first message ->My second message ->My third message ->My fourth message ->My fifth message ->My sixth message ->My seventh message ->My eigth message ->My ninth message ->My tenth message ->Ctrl-C -``` - -Either send at least 10 messages or wait for 300 seconds (check the values of BatchSize: 10 and MaximumBatchingWindowInSeconds: 300 in the template.yaml file) - -Then check Cloudwatch logs and you should see messages for the Cloudwatch Log Group with the name of the deployed Lambda function. - -The lambda code parses the Kafka messages and outputs the fields in the Kafka messages to Cloudwatch logs - -A single lambda function receives a batch of messages. The messages are received as a map with each key being a combination of the topic and the partition, as a single batch can receive messages from multiple partitions. - -Each key has a list of messages. Each Kafka message has the following properties - Topic, Partition, Offset, TimeStamp, TimeStampType, Key and Value - -The Key and Value are base64 encoded and have to be decoded. A message can also have a list of headers, each header having a key and a value. - -The code in this example prints out the fields in the Kafka message and also decrypts the key and the value and logs them in Cloudwatch logs. - -## Cleanup - -You can first clean-up the Lambda function by running the sam delete command - -``` -cd /home/ec2-user/serverless-patterns/msk-lambda-iam-java-sam -sam delete - -``` -confirm by pressing y for both the questions -You should see the lambda function getting deleted and a final confirmation "Deleted successfully" on the command-line - -Next you need to delete the Cloudformation template that created the MSK Server and the EC2 machine by going to the Cloudformation console and selecting the stack and then hitting the "Delete" button. It will run for sometime but eventually you should see the stack getting cleaned up. \ No newline at end of file diff --git a/msk-lambda-iam-python-sam/README.md b/msk-lambda-iam-python-sam/README.md index 08a65c7ba..63a20fd7a 100644 --- a/msk-lambda-iam-python-sam/README.md +++ b/msk-lambda-iam-python-sam/README.md @@ -1,99 +1,72 @@ # Python AWS Lambda Kafka consumer with IAM auth, using AWS SAM -This pattern is an example of a Lambda function that consumes messages from an Amazon Managed Streaming for Kafka (Amazon MSK) topic, where the MSK Cluster has been configured to use IAM authentication. This pattern assumes you already have an MSK cluster with a topic configured, if you need a sample pattern to deploy an MSK cluster either in Provisioned or Serverless modes please see the [msk-cfn-sasl-lambda pattern](https://serverlessland.com/patterns/msk-cfn-sasl-lambda). +This pattern is an example of a Lambda function that consumes messages from an Amazon Managed Streaming for Kafka (Amazon MSK) topic, where the MSK Cluster has been configured to use IAM authentication. This project contains source code and supporting files for a serverless application that you can deploy with the AWS Serverless Application Model (AWS SAM) CLI. It includes the following files and folders. - HandlerKafka - Code for the application's Lambda function. - events - Invocation events that you can use to invoke the function. -- template.yaml - An AWS SAM template that defines the application's AWS resources. +- template_original.yaml - An AWS SAM template that defines the application's AWS resources. +- MSKAndKafkaClientEC2.yaml - A Cloudformation template file that can be used to deploy an MSK cluster and also deploy an EC2 machine with all pre-requisities already installed, so you can directly build and deploy the lambda function and test it out. -The application creates a Lambda function that listens to Kafka messages on a topic of an MSK Cluster. These resources are defined in the `template.yaml` file in this project. You can update the template to add AWS resources through the same deployment process that updates your application code. +The application creates a Lambda function that listens to Kafka messages on a topic of an MSK Cluster. These resources are defined in the `template_original.yaml` file in this project. You can update the template to add AWS resources through the same deployment process that updates your application code. Important: this application uses various AWS services and there are costs associated with these services after the Free Tier usage - please see the [AWS Pricing page](https://aws.amazon.com/pricing/) for details. You are responsible for any AWS costs incurred. No warranty is implied in this example. ## Requirements * [Create an AWS account](https://portal.aws.amazon.com/gp/aws/developer/registration/index.html) if you do not already have one and log in. The IAM user that you use must have sufficient permissions to make necessary AWS service calls and manage AWS resources. -* [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) installed and configured -* [Git installed](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) -* [AWS Serverless Application Model](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) (AWS SAM) installed -* Create MSK cluster and topic that will be used for testing. It is important to create the topic before deploying the Lambda function, otherwise the event source mapping will stay disabled. -## Deploy the sample application - -The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. - -To use the AWS SAM CLI, you need the following tools. - -* AWS SAM CLI - [Install the AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) -* Docker - [Install Docker community edition](https://hub.docker.com/search/?type=edition&offering=community) - -1. Create a new directory, navigate to that directory in a terminal and clone the GitHub repository: - ``` - git clone https://github.com/aws-samples/serverless-patterns.git - ``` -1. Change directory to the pattern directory: - ``` - cd msk-lambda-iam-python-sam - ``` - -1. From the command line, use AWS SAM to deploy the AWS resources for the pattern as specified in the template.yml file: - ``` - sam build - sam deploy --guided - ``` +## Run the Cloudformation template to create the MSK Cluster and Client EC2 machine -1. During the prompts: -* **Stack Name**: The name of the stack to deploy to CloudFormation. This should be unique to your account and region, and a good starting point would be something matching your project name. -* **AWS Region**: The AWS region you want to deploy your app to. -* **Parameter MSKClusterName**: The name of the MSKCluster, eg. msk-test-cluster +* [Run the Cloudformation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS Cloudformation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the Cloudformation stack to be created. This Cloudformation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 machine that you can use as a client. -* **Parameter MSKClusterId**: The unique ID of the MSKCluster, eg. a4e132c8-6ad0-4334-a313-123456789012-s2 -* **Parameter MSKTopic**: The Kafka topic on which the lambda function will listen on -* **Confirm changes before deploy**: If set to yes, any change sets will be shown to you before execution for manual review. If set to no, the AWS SAM CLI will automatically deploy application changes. -* **Allow SAM CLI IAM role creation**: Many AWS SAM templates, including this example, create AWS IAM roles required for the AWS Lambda function(s) included to access AWS services. By default, these are scoped down to minimum required permissions. To deploy an AWS CloudFormation stack which creates or modifies IAM roles, the `CAPABILITY_IAM` value for `capabilities` must be provided. If permission isn't provided through this prompt, to deploy this example you must explicitly pass `--capabilities CAPABILITY_IAM` to the `sam deploy` command. -* **Disable rollback**: Defaults to No and it preserves the state of previously provisioned resources when an operation fails -* **Save arguments to configuration file**: If set to yes, your choices will be saved to a configuration file inside the project, so that in the future you can just re-run `sam deploy` without parameters to deploy changes to your application. -* **SAM configuration file [samconfig.toml]**: Name of the configuration file to store configuration information locally -* **SAM configuration environment [default]**: Environment for storing deployment information locally +* [Connect to the EC2 machine] - Once the Cloudformation stack is created, you can go to the EC2 console and log into the machine using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. +Note: You may need to wait for some time after the Cloudformation stack is created, as some UserData scripts continue running after the Cloudformation stack shows Created. -You should get a message "Successfully created/updated stack - in " if all goes well. +* [Check if Kafka Topic has been created] - Once you are inside the EC2 machine, you should be in the /home/ec2-user folder. Check to see the contents of the file kafka_topic_creator_output.txt by running the command cat kafka_topic_creator_output.txt. You should see an output such as "Created topic MskIamPythonLambdaTopic." -Once you have run `sam deploy --guided` mode once and saved arguments to a configuration file (samconfig.toml), you can use `sam deploy` in future to use these defaults. +If you are not able to find the file kafka_topic_creator_output.txt or if it is blank or you see an error message, then you need to run the file ./kafka_topic_creator.sh. This file runs a script that goes and creates the Kafka topic that the Lambda function will subscribe to. -## How it works +## Pre-requisites to Deploy the sample Lambda function -This pattern creates a Lambda function along with a Lambda Event Source Mapping(ESM) resource. This maps a Kafka topic on an MSK Cluster as a trigger to a Lambda function. The ESM takes care of polling the Kafka topic and then invokes the Lambda function with a batch of messages. +The EC2 machine that was created by running the Cloudformation template has all the software that will be needed to deploy the Lambda function. -## Test the sample application - -Once the Lambda function is deployed, send some Kafka messages to the topic that you configured in the Lambda function trigger. - -Either send at least 10 messages or wait for 300 seconds (check the values of BatchSize: 10 and MaximumBatchingWindowInSeconds: 300 in the template.yaml file) - -Then check Amazon CloudWatch logs and you should see messages in the CloudWatch Log Group with the name of the deployed Lambda function. - -The Lambda code parses the Kafka messages and outputs the fields in the Kafka messages to CloudWatch logs. +The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. -A single Lambda function receives a batch of messages. The messages are received as a map with each key being a combination of the topic and the partition, as a single batch can receive messages from multiple partitions. +* Python3 - We have installed the version of Python3 that you picked up at the time of specifying the input parameters to the Cloudformation template +* AWS SAM CLI - We have installed the AWS SAM CLI (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +* Docker - We have installed the Docker Community Edition on the EC2 machine (https://hub.docker.com/search/?type=edition&offering=community) -Each key has a list of messages. Each Kafka message has the following properties - `Topic`, `Partition`, `Offset`, `TimeStamp`, `TimeStampType`, `Key`, and `Value`. +We have also cloned the Github repository for serverless-patterns on the EC2 machine already by running the below command + ``` + git clone https://github.com/aws-samples/serverless-patterns.git + ``` +Change directory to the pattern directory: + ``` + cd serverless-patterns/msk-lambda-iam-python-sam + ``` +## Use the SAM CLI to build and test locally -The `Key` and `Value` are base64 encoded and have to be decoded. A message can also have a list of headers, each header having a key and a value. +Build your application with the `sam build` command. -The code in this example prints out the fields in the Kafka message and also decrypts the key and the value and logs them to CloudWatch logs. +```bash +sam build +``` +The SAM CLI creates a deployment package, and saves it in the `.aws-sam/build` folder. -### Local development +Test a single function by invoking it directly with a test event. An event is a JSON document that represents the input that the function receives from the event source. Test events are included in the `events` folder in this project. -**You can invoke the function locally using `sam local`** +Run functions locally and invoke them with the `sam local invoke` command. ```bash -sam local invoke --event=events/event.json +sam local invoke --event events/event.json ``` -You should see a response similar to the below +You should see a response such as the below: +``` +***** Begin sam local invoke response ***** `START RequestId: 5c10310a-abf9-416e-b017-697d2c3ba097 Version: $LATEST Received an event: {'eventSource': 'aws:kafka', 'eventSourceArn': 'arn:aws:kafka:us-west-2:123456789012:cluster/MSKWorkshopCluster/a93759a9-c9d0-4952-984c-492c6bfa2be8-13', 'bootstrapServers': 'b-1.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098,b-3.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098,b-2.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098', 'records': {'myTopic-0': [{'topic': 'myTopic', 'partition': 0, 'offset': 383, 'timestamp': 1678484822068, 'timestampType': 'CREATE_TIME', 'value': 'bTE=', 'headers': []}, {'topic': 'myTopic', 'partition': 0, 'offset': 384, 'timestamp': 1678484823448, 'timestampType': 'CREATE_TIME', 'value': 'bTI=', 'headers': []}, {'topic': 'myTopic', 'partition': 0, 'offset': 385, 'timestamp': 1678484824763, 'timestampType': 'CREATE_TIME', 'value': 'bTM=', 'headers': []}, {'topic': 'myTopic', 'partition': 0, 'offset': 386, 'timestamp': 1678484825902, 'timestampType': 'CREATE_TIME', 'value': 'bTQ=', 'headers': []}, {'topic': 'myTopic', 'partition': 0, 'offset': 387, 'timestamp': 1678484827810, 'timestampType': 'CREATE_TIME', 'value': 'bTU=', 'headers': []}]}} @@ -165,12 +138,91 @@ Now finished printing details of record number: 5 END RequestId: 5c10310a-abf9-416e-b017-697d2c3ba097 REPORT RequestId: 5c10310a-abf9-416e-b017-697d2c3ba097 Init Duration: 6.68 ms Duration: 1502.83 ms Billed Duration: 1503 ms Memory Size: 128 MB Max Memory Used: 128 MB +***** End sam local invoke response ***** +``` + +## Deploy the sample application + + +To deploy your application for the first time, run the following in your shell: + +```bash +sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-lambda-iam-python-sam --guided +``` + +The sam deploy command will package and deploy your application to AWS, with a series of prompts. You can accept all the defaults by hitting Enter: + +* **Stack Name**: The name of the stack to deploy to CloudFormation. This should be unique to your account and region, and a good starting point would be something matching your project name. +* **AWS Region**: The AWS region you want to deploy your app to. +* **Parameter MSKClusterName**: The name of the MSKCluster +* **Parameter MSKClusterId**: The unique ID of the MSKCluster +* **Parameter MSKTopic**: The Kafka topic on which the lambda function will listen on +* **Confirm changes before deploy**: If set to yes, any change sets will be shown to you before execution for manual review. If set to no, the AWS SAM CLI will automatically deploy application changes. +* **Allow SAM CLI IAM role creation**: Many AWS SAM templates, including this example, create AWS IAM roles required for the AWS Lambda function(s) included to access AWS services. By default, these are scoped down to minimum required permissions. To deploy an AWS CloudFormation stack which creates or modifies IAM roles, the `CAPABILITY_IAM` value for `capabilities` must be provided. If permission isn't provided through this prompt, to deploy this example you must explicitly pass `--capabilities CAPABILITY_IAM` to the `sam deploy` command. +* **Disable rollback**: Defaults to No and it preserves the state of previously provisioned resources when an operation fails +* **Save arguments to configuration file**: If set to yes, your choices will be saved to a configuration file inside the project, so that in the future you can just re-run `sam deploy` without parameters to deploy changes to your application. +* **SAM configuration file [samconfig.toml]**: Name of the configuration file to store configuration information locally +* **SAM configuration environment [default]**: Environment for storing deployment information locally + +You should get a message "Successfully created/updated stack - in " if all goes well + +Once you have run `sam deploy --guided` mode once and saved arguments to a configuration file (samconfig.toml), you can use `sam deploy` in future to use these defaults. + +## How it works + +This pattern creates a Lambda function along with a Lambda Event Source Mapping(ESM) resource. This maps a Kafka topic on an MSK Cluster as a trigger to a Lambda function. The ESM takes care of polling the Kafka topic and then invokes the Lambda function with a batch of messages. + +## Test the sample application + +Once the lambda function is deployed, send some Kafka messages on the topic that the lambda function is listening on, on the MSK server. + +For your convenience, a script has been created on the EC2 machine that was provisioned using Cloudformation. + +cd /home/ec2-user + +You should see a script called kafka_message_sender.sh. Run that script and you should be able to send a new Kafka message in every line as shown below + +``` +[ec2-user@ip-10-0-0-126 ~]$ sh kafka_message_sender.sh +>My first message +>My second message +>My third message +>My fourth message +>My fifth message +>My sixth message +>My seventh message +>My eigth message +>My ninth message +>My tenth message +>Ctrl-C +``` +Either send at least 10 messages or wait for 300 seconds (check the values of BatchSize: 10 and MaximumBatchingWindowInSeconds: 300 in the template.yaml file) + +Then check Amazon CloudWatch logs and you should see messages in the CloudWatch Log Group with the name of the deployed Lambda function. + +The Lambda code parses the Kafka messages and outputs the fields in the Kafka messages to CloudWatch logs. + +A single Lambda function receives a batch of messages. The messages are received as a map with each key being a combination of the topic and the partition, as a single batch can receive messages from multiple partitions. + +Each key has a list of messages. Each Kafka message has the following properties - `Topic`, `Partition`, `Offset`, `TimeStamp`, `TimeStampType`, `Key`, and `Value`. + +The `Key` and `Value` are base64 encoded and have to be decoded. A message can also have a list of headers, each header having a key and a value. + +The code in this example prints out the fields in the Kafka message and also decrypts the key and the value and logs them to CloudWatch logs. + ## Cleanup -1. Delete the stack - ```bash - sam delete - ``` +You can first clean-up the Lambda function by running the sam delete command + +``` +cd /home/ec2-user/serverless-patterns/msk-lambda-iam-python-sam +sam delete + +``` +confirm by pressing y for both the questions +You should see the lambda function getting deleted and a final confirmation "Deleted successfully" on the command-line + +Next you need to delete the Cloudformation template that created the MSK Server and the EC2 machine by going to the Cloudformation console and selecting the stack and then hitting the "Delete" button. It will run for sometime but eventually you should see the stack getting cleaned up. ---- Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. From baace9ae4959c034d9e188d8b9f07d825fd16455 Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Wed, 4 Dec 2024 17:28:38 -0800 Subject: [PATCH 04/11] Fixed default topic nname in MSKAndKafkaClientEC2.yaml --- msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml index 343eb6760..e264d9064 100644 --- a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml +++ b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml @@ -28,7 +28,7 @@ Parameters: Default: https://archive.apache.org/dist/kafka/3.5.1/kafka_2.13-3.5.1.tgz KafkaTopicForLambda: Type: String - Default: MskIamJavaLambdaTopic + Default: MskIamPythonLambdaTopic ServerlessLandGithubLocation: Type: String Default: https://github.com/aws-samples/serverless-patterns/ From b022fb92dce014e22b922ea816fef0ebf24c443f Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Wed, 4 Dec 2024 19:59:32 -0800 Subject: [PATCH 05/11] Fixed Readme.md --- msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml index e264d9064..cf056881e 100644 --- a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml +++ b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml @@ -552,7 +552,7 @@ Resources: aws ec2 authorize-security-group-ingress --region $AWS_REGION --group-id $SECURITY_GROUP --protocol tcp --port 22 --cidr $EC2_CONNECT_IP - security_group_id : !GetAtt KafkaClientInstanceSecurityGroup.GroupId - msk_cluster_arn : !GetAtt MSKCluster.Arn + msk_cluster_arn : !GetAtt ServerlessMSKCluster.Arn kafka_topic_for_lambda : !Ref KafkaTopicForLambda msk_kafka_version: !Ref MSKKafkaVersion apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation From cb30a2f56653496f1ecff92638f0041b736ab886 Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Thu, 5 Dec 2024 17:18:08 -0800 Subject: [PATCH 06/11] Fixed Node.js MSK Lambda example --- .../MSKAndKafkaClientEC2.yaml | 995 ++++++++++++++++++ .../{template.yaml => template_original.yaml} | 5 +- 2 files changed, 999 insertions(+), 1 deletion(-) create mode 100644 msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml rename msk-lambda-iam-node-sam/{template.yaml => template_original.yaml} (96%) diff --git a/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml new file mode 100644 index 000000000..5e00cb1d4 --- /dev/null +++ b/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml @@ -0,0 +1,995 @@ +AWSTemplateFormatVersion: '2010-09-09' +Parameters: + EnvType: + Description: MSK Cluster Type. + Default: Provisioned + Type: String + AllowedValues: + - Serverless + - Provisioned + ConstraintDescription: Must specify Serverless or Provisioned. + LatestAmiId: + Type: 'AWS::SSM::Parameter::Value' + Default: '/aws/service/ami-amazon-linux-latest/al2023-ami-kernel-6.1-x86_64' + NodejsVersion: + Type: String + Description: Choose the version of Nodejs version 18, 20 or 22. + AllowedValues: + - nodejs18.x + - nodejs20.x + - nodejs22.x + Default: nodejs18.x + MSKKafkaVersion: + Type: String + Default: 3.5.1 + ApacheKafkaInstallerLocation: + Type: String + Default: https://archive.apache.org/dist/kafka/3.5.1/kafka_2.13-3.5.1.tgz + KafkaTopicForLambda: + Type: String + Default: MskIamPythonLambdaTopic + ServerlessLandGithubLocation: + Type: String + Default: https://github.com/aws-samples/serverless-patterns/ +Conditions: + CreateProvisionedCluster: !Equals + - !Ref EnvType + - Provisioned + CreateServerlessCluster: !Equals + - !Ref EnvType + - Serverless +Mappings: + SubnetConfig: + VPC: + CIDR: '10.0.0.0/16' + PublicOne: + CIDR: '10.0.0.0/24' + PrivateSubnetMSKOne: + CIDR: '10.0.1.0/24' + PrivateSubnetMSKTwo: + CIDR: '10.0.2.0/24' + PrivateSubnetMSKThree: + CIDR: '10.0.3.0/24' +Resources: + VPC: + Type: AWS::EC2::VPC + Properties: + EnableDnsSupport: true + EnableDnsHostnames: true + CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR'] + Tags: + - Key: 'Name' + Value: 'MSKVPC' + + PublicSubnetOne: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PublicOne', 'CIDR'] + MapPublicIpOnLaunch: true + Tags: + - Key: 'Name' + Value: 'PublicSubnet' + PrivateSubnetMSKOne: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKOne', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKOne' + PrivateSubnetMSKTwo: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 1 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKTwo', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKTwo' + PrivateSubnetMSKThree: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 2 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKThree', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKThree' + + InternetGateway: + Type: AWS::EC2::InternetGateway + GatewayAttachement: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + VpcId: !Ref 'VPC' + InternetGatewayId: !Ref 'InternetGateway' + + NATEIP: + Type: AWS::EC2::EIP + DependsOn: GatewayAttachement + Properties: + Domain: vpc + + NATGateway: + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt NATEIP.AllocationId + SubnetId: !Ref 'PublicSubnetOne' + Tags: + - Key: 'Name' + Value: 'ConfluentKafkaNATGateway' + + PublicRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref 'VPC' + PublicRoute: + Type: AWS::EC2::Route + DependsOn: GatewayAttachement + Properties: + RouteTableId: !Ref 'PublicRouteTable' + DestinationCidrBlock: '0.0.0.0/0' + GatewayId: !Ref 'InternetGateway' + PublicSubnetOneRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PublicSubnetOne + RouteTableId: !Ref PublicRouteTable + + PrivateRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref 'VPC' + + PrivateRoute: + Type: AWS::EC2::Route + DependsOn: NATGateway + Properties: + RouteTableId: !Ref 'PrivateRouteTable' + DestinationCidrBlock: '0.0.0.0/0' + NatGatewayId: !Ref 'NATGateway' + + PrivateSubnetMSKOneRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKOne + PrivateSubnetMSKTwoRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKTwo + PrivateSubnetMSKThreeRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKThree + + KafkaClientInstanceSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Enable SSH access via port 22 from BastionHostSecurityGroup + GroupName: !Sub "${AWS::StackName} Security group attached to the kakfa client producer" + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3500 + ToPort: 3500 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3600 + ToPort: 3600 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3800 + ToPort: 3800 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3900 + ToPort: 3900 + CidrIp: 10.0.0.0/24 + + MSKSecurityGroup: + Type: AWS::EC2::SecurityGroup + DependsOn: [VPC,KafkaClientInstanceSecurityGroup] + Properties: + GroupDescription: MSK Security Group + GroupName: !Sub "${AWS::StackName} Security group for the MSK cluster" + VpcId: !Ref 'VPC' + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 2181 + ToPort: 2181 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9094 + ToPort: 9094 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9096 + ToPort: 9096 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9092 + ToPort: 9092 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9098 + ToPort: 9098 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 8083 + ToPort: 8083 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 8081 + ToPort: 8081 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + + MSKSelfIngressAllowRule: + Type: AWS::EC2::SecurityGroupIngress + DependsOn: MSKSecurityGroup + Properties: + GroupId: !GetAtt MSKSecurityGroup.GroupId + Description: Enable Self referencing Bootstrap servers + IpProtocol: tcp + FromPort: 9092 + ToPort: 9098 + SourceSecurityGroupId: !GetAtt MSKSecurityGroup.GroupId + + KafkaClientSelfIngressAllowRule: + Type: AWS::EC2::SecurityGroupIngress + DependsOn: KafkaClientInstanceSecurityGroup + Properties: + GroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + IpProtocol: tcp + FromPort: 22 + ToPort: 22 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + + KafkaClientEC2InstanceProvisioned: + Condition: CreateProvisionedCluster + DependsOn: MSKCluster + Type: AWS::EC2::Instance + Properties: + InstanceType: m5.large + IamInstanceProfile: !Ref EC2InstanceProfile + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + SubnetId: !Ref PublicSubnetOne + SecurityGroupIds: [!GetAtt KafkaClientInstanceSecurityGroup.GroupId] + ImageId: !Ref LatestAmiId + Tags: + - Key: 'Name' + Value: 'KafkaClientInstance' + BlockDeviceMappings: + - DeviceName: /dev/xvda + Ebs: + VolumeSize: 50 + VolumeType: gp2 + DeleteOnTermination: true + UserData: + Fn::Base64: + !Sub + - | + #!/bin/bash + yum update -y + sudo yum install ec2-instance-connect + sudo yum install java-11-amazon-corretto-devel -y + yum install nmap-ncat -y + yum install git -y + yum install jq -y + + #install node.js + NODEJS_VERSION=${nodejs_version} + if ["$NODEJS_VERSION" == "nodejs18.x"] + sudo dnf install nodejs + elif ["$NODEJS_VERSION" == "nodejs20.x"] + sudo dnf install nodejs20 + elif ["$NODEJS_VERSION" == "nodejs22.x"] + sudo dnf install nodejs20 + else + sudo dnf install nodejs + fi + echo "export NODEJS_VERSION=$NODEJS_VERSION" >> /home/ec2-user/.bash_profile + + + # install and start docker + sudo yum install -y docker + sudo service docker start + sudo usermod -a -G docker ec2-user + + # install AWS CLI 2 - access with aws2 + sudo yum remove awscli + cd /home/ec2-user + mkdir -p awscli + cd awscli + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + # Create dirs, get Apache Kafka and unpack it + cd /home/ec2-user + KAFKA_VERSION=${msk_kafka_version} + KAFKA_FOLDER_VERSION=$(echo "$KAFKA_VERSION" | tr -d '.') + KAFKA_FOLDER='Kafka'$KAFKA_FOLDER_VERSION + mkdir -p $KAFKA_FOLDER + mkdir -p /tmp/kafka + ln -s /home/ec2-user/$KAFKA_FOLDER /home/ec2-user/kafka + cd $KAFKA_FOLDER + APACHE_KAFKA_INSTALLER_LOCATION=${apache_kafka_installer_location} + wget $APACHE_KAFKA_INSTALLER_LOCATION + APACHE_KAFKA_INSTALLER_FILE=$(echo "$APACHE_KAFKA_INSTALLER_LOCATION" | awk -F "/" '{print $NF}') + tar -xzf $APACHE_KAFKA_INSTALLER_FILE --strip 1 + cd libs + wget https://github.com/aws/aws-msk-iam-auth/releases/download/v2.2.0/aws-msk-iam-auth-2.2.0-all.jar + cd ../bin + echo "security.protocol=SASL_SSL" > client.properties + echo "sasl.mechanism=AWS_MSK_IAM" >> client.properties + echo "sasl.jaas.config=software.amazon.msk.auth.iam.IAMLoginModule required;" >> client.properties + echo "sasl.client.callback.handler.class=software.amazon.msk.auth.iam.IAMClientCallbackHandler" >> client.properties + + # Install AWS SAM CLI + cd /home/ec2-user + mkdir -p awssam + cd awssam + wget https://github.com/aws/aws-sam-cli/releases/latest/download/aws-sam-cli-linux-x86_64.zip + unzip aws-sam-cli-linux-x86_64.zip -d sam-installation + sudo ./sam-installation/install + + # Create command files for creating Kafka Topic and Kafka Producer + cd /home/ec2-user + MSK_CLUSTER_ARN=${msk_cluster_arn} + KAFKA_TOPIC=${kafka_topic_for_lambda} + echo "#!/bin/bash" > kafka_topic_creator.sh + sudo chmod +x kafka_topic_creator.sh + echo "MSK_CLUSTER_ARN=$MSK_CLUSTER_ARN" >> kafka_topic_creator.sh + AWS_REGION=${aws_region} + echo "AWS_REGION=$AWS_REGION" >> kafka_topic_creator.sh + echo "BOOTSTRAP_BROKERS_IAM=\$(aws kafka get-bootstrap-brokers --region \$AWS_REGION --cluster-arn \$MSK_CLUSTER_ARN --query 'BootstrapBrokerStringSaslIam' --output text)" >> kafka_topic_creator.sh + echo "sleep 5" >> kafka_topic_creator.sh + echo "KAFKA_TOPIC=$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "/home/ec2-user/kafka/bin/kafka-topics.sh --create --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --command-config /home/ec2-user/kafka/bin/client.properties --replication-factor 3 --partitions 3 --topic \$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "echo \"export MSK_CLUSTER_ARN=\$MSK_CLUSTER_ARN\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export AWS_REGION=\$AWS_REGION\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export BOOTSTRAP_BROKERS_IAM=\$BOOTSTRAP_BROKERS_IAM\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export KAFKA_TOPIC=\$KAFKA_TOPIC\" >> .bash_profile" >> kafka_topic_creator.sh + echo "#!/bin/bash" > kafka_message_sender.sh + echo "source /home/ec2-user/.bash_profile" >> kafka_message_sender.sh + echo "/home/ec2-user/kafka/bin/kafka-console-producer.sh --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --producer.config /home/ec2-user/kafka/bin/client.properties --topic $KAFKA_TOPIC" >> kafka_message_sender.sh + sudo chmod +x kafka_message_sender.sh + CLUSTER_NAME="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f2)" + CLUSTER_ID="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f3)" + echo "export CLUSTER_NAME=$CLUSTER_NAME" >> /home/ec2-user/.bash_profile + echo "export CLUSTER_ID=$CLUSTER_ID" >> /home/ec2-user/.bash_profile + ./kafka_topic_creator.sh > kafka_topic_creator_output.txt + + #Checkout Serverless Patterns from Github + cd /home/ec2-user + SERVERLESS_LAND_GITHUB_LOCATION=${serverless_land_github_location} + git clone $SERVERLESS_LAND_GITHUB_LOCATION + cd ./serverless-patterns/msk-lambda-iam-python-sam + cp template_original.yaml template.yaml + sudo chown -R ec2-user . + source /home/ec2-user/.bash_profile + sed -i "s/CLUSTER_NAME/$CLUSTER_NAME/g" template.yaml + sed -i "s/CLUSTER_ID/$CLUSTER_ID/g" template.yaml + sed -i "s/KAFKA_TOPIC/$KAFKA_TOPIC/g" template.yaml + sed -i "s/NODEJS_VERSION/$NODEJS_VERSION/g" template.yaml + + # Get IP CIDR range for EC2 Instance Connect + cd /home/ec2-user + mkdir -p ip_prefix + cd ip_prefix + git clone https://github.com/joetek/aws-ip-ranges-json.git + cd aws-ip-ranges-json + AWS_REGION=${aws_region} + EC2_CONNECT_IP=$(cat ip-ranges-ec2-instance-connect.json | jq -r --arg AWS_REGION "$AWS_REGION" '.prefixes[] | select(.region==$AWS_REGION).ip_prefix') + echo "export EC2_CONNECT_IP=$EC2_CONNECT_IP" >> /home/ec2-user/.bash_profile + SECURITY_GROUP=${security_group_id} + echo "export SECURITY_GROUP=$SECURITY_GROUP" >> /home/ec2-user/.bash_profile + aws ec2 authorize-security-group-ingress --region $AWS_REGION --group-id $SECURITY_GROUP --protocol tcp --port 22 --cidr $EC2_CONNECT_IP + + - security_group_id : !GetAtt KafkaClientInstanceSecurityGroup.GroupId + msk_cluster_arn : !GetAtt MSKCluster.Arn + kafka_topic_for_lambda : !Ref KafkaTopicForLambda + msk_kafka_version: !Ref MSKKafkaVersion + apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation + serverless_land_github_location: !Ref ServerlessLandGithubLocation + nodejs_version: !Ref NodejsVersion + aws_region: !Ref 'AWS::Region' + + KafkaClientEC2InstanceServerless: + Condition: CreateServerlessCluster + DependsOn: ServerlessMSKCluster + Type: AWS::EC2::Instance + Properties: + InstanceType: m5.large + IamInstanceProfile: !Ref EC2InstanceProfile + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + SubnetId: !Ref PublicSubnetOne + SecurityGroupIds: [!GetAtt KafkaClientInstanceSecurityGroup.GroupId] + ImageId: !Ref LatestAmiId + Tags: + - Key: 'Name' + Value: 'KafkaClientInstance' + BlockDeviceMappings: + - DeviceName: /dev/xvda + Ebs: + VolumeSize: 50 + VolumeType: gp2 + DeleteOnTermination: true + UserData: + Fn::Base64: + !Sub + - | + #!/bin/bash + yum update -y + sudo yum install ec2-instance-connect + sudo yum install java-11-amazon-corretto-devel -y + yum install nmap-ncat -y + yum install git -y + yum install jq -y + + #install node.js + NODEJS_VERSION=${nodejs_version} + if ["$NODEJS_VERSION" == "nodejs18.x"] + sudo dnf install nodejs + elif ["$NODEJS_VERSION" == "nodejs20.x"] + sudo dnf install nodejs20 + elif ["$NODEJS_VERSION" == "nodejs22.x"] + sudo dnf install nodejs20 + else + sudo dnf install nodejs + fi + echo "export NODEJS_VERSION=$NODEJS_VERSION" >> /home/ec2-user/.bash_profile + + # install and start docker + sudo yum install -y docker + sudo service docker start + sudo usermod -a -G docker ec2-user + + # install AWS CLI 2 - access with aws2 + sudo yum remove awscli + cd /home/ec2-user + mkdir -p awscli + cd awscli + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + # Create dirs, get Apache Kafka and unpack it + cd /home/ec2-user + KAFKA_VERSION=${msk_kafka_version} + KAFKA_FOLDER_VERSION=$(echo "$KAFKA_VERSION" | tr -d '.') + KAFKA_FOLDER='Kafka'$KAFKA_FOLDER_VERSION + mkdir -p $KAFKA_FOLDER + mkdir -p /tmp/kafka + ln -s /home/ec2-user/$KAFKA_FOLDER /home/ec2-user/kafka + cd $KAFKA_FOLDER + APACHE_KAFKA_INSTALLER_LOCATION=${apache_kafka_installer_location} + wget $APACHE_KAFKA_INSTALLER_LOCATION + APACHE_KAFKA_INSTALLER_FILE=$(echo "$APACHE_KAFKA_INSTALLER_LOCATION" | awk -F "/" '{print $NF}') + tar -xzf $APACHE_KAFKA_INSTALLER_FILE --strip 1 + cd libs + wget https://github.com/aws/aws-msk-iam-auth/releases/download/v2.2.0/aws-msk-iam-auth-2.2.0-all.jar + cd ../bin + echo "security.protocol=SASL_SSL" > client.properties + echo "sasl.mechanism=AWS_MSK_IAM" >> client.properties + echo "sasl.jaas.config=software.amazon.msk.auth.iam.IAMLoginModule required;" >> client.properties + echo "sasl.client.callback.handler.class=software.amazon.msk.auth.iam.IAMClientCallbackHandler" >> client.properties + + # Install AWS SAM CLI + cd /home/ec2-user + mkdir -p awssam + cd awssam + wget https://github.com/aws/aws-sam-cli/releases/latest/download/aws-sam-cli-linux-x86_64.zip + unzip aws-sam-cli-linux-x86_64.zip -d sam-installation + sudo ./sam-installation/install + + # Create command files for creating Kafka Topic and Kafka Producer + cd /home/ec2-user + MSK_CLUSTER_ARN=${msk_cluster_arn} + KAFKA_TOPIC=${kafka_topic_for_lambda} + echo "#!/bin/bash" > kafka_topic_creator.sh + sudo chmod +x kafka_topic_creator.sh + echo "MSK_CLUSTER_ARN=$MSK_CLUSTER_ARN" >> kafka_topic_creator.sh + AWS_REGION=${aws_region} + echo "AWS_REGION=$AWS_REGION" >> kafka_topic_creator.sh + echo "BOOTSTRAP_BROKERS_IAM=\$(aws kafka get-bootstrap-brokers --region \$AWS_REGION --cluster-arn \$MSK_CLUSTER_ARN --query 'BootstrapBrokerStringSaslIam' --output text)" >> kafka_topic_creator.sh + echo "sleep 5" >> kafka_topic_creator.sh + echo "KAFKA_TOPIC=$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "/home/ec2-user/kafka/bin/kafka-topics.sh --create --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --command-config /home/ec2-user/kafka/bin/client.properties --replication-factor 3 --partitions 3 --topic \$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "echo \"export MSK_CLUSTER_ARN=\$MSK_CLUSTER_ARN\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export AWS_REGION=\$AWS_REGION\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export BOOTSTRAP_BROKERS_IAM=\$BOOTSTRAP_BROKERS_IAM\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export KAFKA_TOPIC=\$KAFKA_TOPIC\" >> .bash_profile" >> kafka_topic_creator.sh + echo "#!/bin/bash" > kafka_message_sender.sh + echo "source /home/ec2-user/.bash_profile" >> kafka_message_sender.sh + echo "/home/ec2-user/kafka/bin/kafka-console-producer.sh --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --producer.config /home/ec2-user/kafka/bin/client.properties --topic $KAFKA_TOPIC" >> kafka_message_sender.sh + sudo chmod +x kafka_message_sender.sh + CLUSTER_NAME="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f2)" + CLUSTER_ID="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f3)" + echo "export CLUSTER_NAME=$CLUSTER_NAME" >> /home/ec2-user/.bash_profile + echo "export CLUSTER_ID=$CLUSTER_ID" >> /home/ec2-user/.bash_profile + ./kafka_topic_creator.sh > kafka_topic_creator_output.txt + + #Checkout Serverless Patterns from Github + cd /home/ec2-user + SERVERLESS_LAND_GITHUB_LOCATION=${serverless_land_github_location} + git clone $SERVERLESS_LAND_GITHUB_LOCATION + cd ./serverless-patterns/msk-lambda-iam-python-sam + cp template_original.yaml template.yaml + sudo chown -R ec2-user . + source /home/ec2-user/.bash_profile + sed -i "s/CLUSTER_NAME/$CLUSTER_NAME/g" template.yaml + sed -i "s/CLUSTER_ID/$CLUSTER_ID/g" template.yaml + sed -i "s/KAFKA_TOPIC/$KAFKA_TOPIC/g" template.yaml + sed -i "s/NODEJS_VERSION/$NODEJS_VERSION/g" template.yaml + + # Get IP CIDR range for EC2 Instance Connect + cd /home/ec2-user + mkdir -p ip_prefix + cd ip_prefix + git clone https://github.com/joetek/aws-ip-ranges-json.git + cd aws-ip-ranges-json + AWS_REGION=${aws_region} + EC2_CONNECT_IP=$(cat ip-ranges-ec2-instance-connect.json | jq -r --arg AWS_REGION "$AWS_REGION" '.prefixes[] | select(.region==$AWS_REGION).ip_prefix') + echo "export EC2_CONNECT_IP=$EC2_CONNECT_IP" >> /home/ec2-user/.bash_profile + SECURITY_GROUP=${security_group_id} + echo "export SECURITY_GROUP=$SECURITY_GROUP" >> /home/ec2-user/.bash_profile + aws ec2 authorize-security-group-ingress --region $AWS_REGION --group-id $SECURITY_GROUP --protocol tcp --port 22 --cidr $EC2_CONNECT_IP + + - security_group_id : !GetAtt KafkaClientInstanceSecurityGroup.GroupId + msk_cluster_arn : !GetAtt ServerlessMSKCluster.Arn + kafka_topic_for_lambda : !Ref KafkaTopicForLambda + msk_kafka_version: !Ref MSKKafkaVersion + apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation + serverless_land_github_location: !Ref ServerlessLandGithubLocation + nodejs_version: !Ref NodejsVersion + aws_region: !Ref 'AWS::Region' + + EC2InstanceEndpoint: + Type: AWS::EC2::InstanceConnectEndpoint + Properties: + PreserveClientIp: true + SecurityGroupIds: + - !GetAtt KafkaClientInstanceSecurityGroup.GroupId + SubnetId: !Ref PublicSubnetOne + + EC2Role: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Sid: '' + Effect: Allow + Principal: + Service: ec2.amazonaws.com + Action: 'sts:AssumeRole' + Path: "/" + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonMSKFullAccess + - arn:aws:iam::aws:policy/AWSCloudFormationFullAccess + - arn:aws:iam::aws:policy/CloudWatchFullAccess + - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - arn:aws:iam::aws:policy/AmazonS3FullAccess + - arn:aws:iam::aws:policy/AWSCertificateManagerPrivateCAFullAccess + - arn:aws:iam::aws:policy/IAMFullAccess + - arn:aws:iam::aws:policy/AWSLambda_FullAccess + Policies: + - PolicyName: MSKConfigurationAccess + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": "kafka:CreateConfiguration", + "Resource": "*" + } + ] + }' + - PolicyName: CloudformationDeploy + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "iam:*" + ], + "Resource": "*" + } + ] + }' + - PolicyName: MSKProducerPermissions + PolicyDocument: + Version: 2012-10-17 + Statement: + - Sid: SecretsAccess + Effect: Allow + Action: + - 'secretsmanager:*' + - 'kms:*' + - 'glue:*Schema*' + - 'iam:CreatePolicy' + - 'iam:Tag*' + - 'iam:AttachRolePolicy' + Resource: '*' + - PolicyName: MSKConnectAuthentication + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:*Topic*", + "kafka-cluster:Connect", + "kafka-cluster:AlterCluster", + "kafka-cluster:DescribeCluster", + "kafka-cluster:DescribeClusterDynamicConfiguration" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:cluster/${AWS::StackName}-cluster/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:*Topic*", + "kafka-cluster:WriteData", + "kafka-cluster:ReadData" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:topic/${AWS::StackName}-cluster/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:AlterGroup", + "kafka-cluster:DescribeGroup" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:group/${AWS::StackName}-cluster/*" + ] + } + ] + }' + - PolicyName: SecurityGroupsPolicy + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeSecurityGroups", + "ec2:DescribeSecurityGroupRules", + "ec2:DescribeTags" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:RevokeSecurityGroupEgress", + "ec2:ModifySecurityGroupRules", + "ec2:UpdateSecurityGroupRuleDescriptionsIngress", + "ec2:UpdateSecurityGroupRuleDescriptionsEgress" + ], + "Resource": [ + "arn:aws:ec2:${AWS::Region}:${AWS::AccountId}:security-group/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:ModifySecurityGroupRules" + ], + "Resource": [ + "arn:aws:ec2:${AWS::Region}:${AWS::AccountId}:security-group-rule/*" + ] + } + ] + }' + + EC2InstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + InstanceProfileName: !Join + - '-' + - - 'EC2MMMSKCFProfile' + - !Ref 'AWS::StackName' + Roles: + - !Ref EC2Role + + + MSKCertAuthority: + Type: AWS::ACMPCA::CertificateAuthority + Condition: CreateProvisionedCluster + Properties: + KeyAlgorithm: "RSA_4096" + SigningAlgorithm: "SHA256WITHRSA" + Subject: + Country: "US" + Type: "ROOT" + + MSKCert: + Type: AWS::ACMPCA::Certificate + Condition: CreateProvisionedCluster + Properties: + CertificateAuthorityArn: !Ref MSKCertAuthority + CertificateSigningRequest: !GetAtt + - MSKCertAuthority + - CertificateSigningRequest + SigningAlgorithm: "SHA256WITHRSA" + TemplateArn: arn:aws:acm-pca:::template/RootCACertificate/V1 + Validity: + Type: YEARS + Value: 10 + + RootCAActivation: + Type: AWS::ACMPCA::CertificateAuthorityActivation + Condition: CreateProvisionedCluster + Properties: + CertificateAuthorityArn: + Ref: MSKCertAuthority + Certificate: + Fn::GetAtt: + - MSKCert + - Certificate + Status: ACTIVE + + RootCAPermission: + Type: AWS::ACMPCA::Permission + Condition: CreateProvisionedCluster + Properties: + Actions: + - IssueCertificate + - GetCertificate + - ListPermissions + CertificateAuthorityArn: !Ref MSKCertAuthority + Principal: acm.amazonaws.com + + CredentialsKMSKey: + Type: AWS::KMS::Key + Condition: CreateProvisionedCluster + Properties: + Description: "KMS key to use with credentials secret with KMS" + EnableKeyRotation: True + KeyPolicy: + Version: "2012-10-17" + Id: key-default-1 + Statement: + - Sid: Enable IAM User Permissions + Effect: Allow + Principal: + AWS: !Join + - '' + - - 'arn:aws:iam::' + - !Ref 'AWS::AccountId' + - ':root' + Action: 'kms:*' + Resource: '*' + - Sid: Enable Secret Manager Permissions + Effect: Allow + Principal: + AWS: "*" + Action: + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: '*' + Condition: + StringEquals: + kms:CallerAccount: !Ref 'AWS::AccountId' + kms:ViaService: !Join + - '' + - - 'secretsmanager.' + - !Ref 'AWS::Region' + - '.amazonaws.com' + PendingWindowInDays: 7 + + CredentialsKMSKeyAlias: + Type: AWS::KMS::Alias + Condition: CreateProvisionedCluster + Properties: + AliasName: alias/mskstack_secret_manager_key + TargetKeyId: !Ref 'CredentialsKMSKey' + + CredentialsSecret: + Type: AWS::SecretsManager::Secret + Condition: CreateProvisionedCluster + Properties: + Description: "Secret to use for SCRAM Auth" + Name: "AmazonMSK_Credentials" + GenerateSecretString: + SecretStringTemplate: '{"username": "test-user"}' + GenerateStringKey: "password" + PasswordLength: 30 + ExcludeCharacters: '"@/\' + KmsKeyId: !Ref 'CredentialsKMSKey' + + MSKConfiguration: + Type: AWS::MSK::Configuration + Condition: CreateProvisionedCluster + Properties: + Description: "MSKConfiguration" + Name: "MSKConfiguration" + ServerProperties: | + auto.create.topics.enable=true + default.replication.factor=3 + min.insync.replicas=2 + num.io.threads=8 + num.network.threads=5 + num.partitions=1 + num.replica.fetchers=2 + replica.lag.time.max.ms=30000 + socket.receive.buffer.bytes=102400 + socket.request.max.bytes=104857600 + socket.send.buffer.bytes=102400 + unclean.leader.election.enable=true + zookeeper.session.timeout.ms=18000 + delete.topic.enable=true + log.retention.hours=8 + + MSKCluster: + Type: AWS::MSK::Cluster + Condition: CreateProvisionedCluster + Properties: + BrokerNodeGroupInfo: + ClientSubnets: + - !Ref PrivateSubnetMSKOne + - !Ref PrivateSubnetMSKTwo + - !Ref PrivateSubnetMSKThree + SecurityGroups: + - !GetAtt MSKSecurityGroup.GroupId + InstanceType: "kafka.m5.large" + StorageInfo: + EBSStorageInfo: + VolumeSize: 100 + ClientAuthentication: + Unauthenticated: + Enabled: False + Sasl: + Iam: + Enabled: True + Scram: + Enabled: True + Tls: + CertificateAuthorityArnList: + - !Ref MSKCertAuthority + Enabled: True + ClusterName: !Sub "${AWS::StackName}-cluster" + ConfigurationInfo: + Arn: !Ref MSKConfiguration + Revision: 1 + EncryptionInfo: + EncryptionInTransit: + ClientBroker: TLS + InCluster: True + KafkaVersion: !Ref MSKKafkaVersion + NumberOfBrokerNodes: 3 + + SecretMSKAssociation: + Type: AWS::MSK::BatchScramSecret + Condition: CreateProvisionedCluster + Properties: + ClusterArn: !Ref MSKCluster + SecretArnList: + - !Ref CredentialsSecret + + ServerlessMSKCluster: + Type: AWS::MSK::ServerlessCluster + Condition: CreateServerlessCluster + Properties: + ClientAuthentication: + Sasl: + Iam: + Enabled: True + ClusterName: !Sub "${AWS::StackName}-cluster" + VpcConfigs: + - SubnetIds: + - !Ref PrivateSubnetMSKOne + - !Ref PrivateSubnetMSKTwo + - !Ref PrivateSubnetMSKThree + SecurityGroups: + - !GetAtt MSKSecurityGroup.GroupId + +Outputs: + VPCId: + Description: The ID of the VPC created + Value: !Ref 'VPC' + Export: + Name: !Sub "${AWS::StackName}-VPCID" + PublicSubnetOne: + Description: The name of the public subnet created + Value: !Ref 'PublicSubnetOne' + Export: + Name: !Sub "${AWS::StackName}-PublicSubnetOne" + PrivateSubnetMSKOne: + Description: The ID of private subnet one created + Value: !Ref 'PrivateSubnetMSKOne' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKOne" + PrivateSubnetMSKTwo: + Description: The ID of private subnet two created + Value: !Ref 'PrivateSubnetMSKTwo' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKTwo" + PrivateSubnetMSKThree: + Description: The ID of private subnet three created + Value: !Ref 'PrivateSubnetMSKThree' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKThree" + VPCStackName: + Description: The name of the VPC Stack + Value: !Ref 'AWS::StackName' + Export: + Name: !Sub "${AWS::StackName}-VPCStackName" + MSKArn: + Description: Provisioned MSK Cluster ARN. + Value: !Ref MSKCluster + Export: + Name: !Sub "${AWS::StackName}-MSKArn" + Condition: "CreateProvisionedCluster" + CredentialsSecretArn: + Description: ARN for secret manager secret with credentials. + Value: !Ref CredentialsSecret + Export: + Name: !Sub "${AWS::StackName}-CredentialsSecret" + Condition: "CreateProvisionedCluster" + ServerlessMSKArn: + Description: Serverless MSK Cluster ARN. + Value: !Ref ServerlessMSKCluster + Export: + Name: !Sub "${AWS::StackName}-Serverless" + Condition: "CreateServerlessCluster" + SecurityGroupId: + Description: ID of scurity group for MSK clients. + Value: !GetAtt MSKSecurityGroup.GroupId + Export: + Name: !Sub "${AWS::StackName}-SecurityGroupId" + EC2InstanceEndpointID: + Description: The ID of the EC2 Instance Endpoint + Value: !Ref EC2InstanceEndpoint + KafkaTopicForLambda: + Description: The Topic to use for the Java Lambda Function + Value: !Ref KafkaTopicForLambda + Export: + Name: !Sub "${AWS::StackName}-KafkaTopicForLambda" + \ No newline at end of file diff --git a/msk-lambda-iam-node-sam/template.yaml b/msk-lambda-iam-node-sam/template_original.yaml similarity index 96% rename from msk-lambda-iam-node-sam/template.yaml rename to msk-lambda-iam-node-sam/template_original.yaml index 62d4df0da..fb80278cf 100644 --- a/msk-lambda-iam-node-sam/template.yaml +++ b/msk-lambda-iam-node-sam/template_original.yaml @@ -17,7 +17,7 @@ Resources: Properties: CodeUri: HandlerKafka/ Handler: app.handler - Runtime: nodejs18.x + Runtime: NODEJS_VERSION Architectures: - x86_64 Events: @@ -71,12 +71,15 @@ Parameters: MSKClusterName: Type: String Description: Enter the name of the MSK Cluster + Default: CLUSTER_NAME MSKClusterId: Type: String Description: Enter the ID of the MSK Cluster + Default: CLUSTER_ID MSKTopic: Type: String Description: Enter the name of the MSK Topic + Default: KAFKA_TOPIC Outputs: LambdaMSKConsumerNodeJSFunction: Description: "Topic Consumer Lambda Function ARN" From c9515f70f4cda36dd5568503780fb67cb12a12b5 Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Mon, 9 Dec 2024 00:10:47 -0800 Subject: [PATCH 07/11] Fixed Node.js MSK Lambda CFT and Readme.md --- .../MSKAndKafkaClientEC2.yaml | 57 ++++-- msk-lambda-iam-node-sam/README.md | 190 ++++++++++++------ 2 files changed, 165 insertions(+), 82 deletions(-) diff --git a/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml index 5e00cb1d4..5492a654e 100644 --- a/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml +++ b/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml @@ -27,7 +27,7 @@ Parameters: Default: https://archive.apache.org/dist/kafka/3.5.1/kafka_2.13-3.5.1.tgz KafkaTopicForLambda: Type: String - Default: MskIamPythonLambdaTopic + Default: MskIamNodejsLambdaTopic ServerlessLandGithubLocation: Type: String Default: https://github.com/aws-samples/serverless-patterns/ @@ -305,19 +305,26 @@ Resources: yum install jq -y #install node.js + echo "#!/bin/bash" > nodejs_installer.sh + echo "touch ~/.bashrc" >> nodejs_installer.sh + echo "curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash" >> nodejs_installer.sh + echo "source ~/.bashrc" >> nodejs_installer.sh NODEJS_VERSION=${nodejs_version} - if ["$NODEJS_VERSION" == "nodejs18.x"] - sudo dnf install nodejs - elif ["$NODEJS_VERSION" == "nodejs20.x"] - sudo dnf install nodejs20 - elif ["$NODEJS_VERSION" == "nodejs22.x"] - sudo dnf install nodejs20 + if ["$NODEJS_VERSION" == "nodejs18.x"]; then + echo "nvm install 18" >> nodejs_installer.sh + elif ["$NODEJS_VERSION" == "nodejs20.x"]; then + echo "nvm install 20" >> nodejs_installer.sh + elif ["$NODEJS_VERSION" == "nodejs22.x"]; then + echo "nvm install 22" >> nodejs_installer.sh else - sudo dnf install nodejs + echo "nvm install 18" >> nodejs_installer.sh fi - echo "export NODEJS_VERSION=$NODEJS_VERSION" >> /home/ec2-user/.bash_profile - - + echo "source ~/.bashrc" >> nodejs_installer.sh + sudo chmod +x nodejs_installer.sh + cp ./nodejs_installer.sh /home/ec2-user/nodejs_installer.sh + cd cd /home/ec2-user + sudo -u ec2-user ./nodejs_installer.sh > nodejs_installer_output.txt + # install and start docker sudo yum install -y docker sudo service docker start @@ -392,7 +399,7 @@ Resources: cd /home/ec2-user SERVERLESS_LAND_GITHUB_LOCATION=${serverless_land_github_location} git clone $SERVERLESS_LAND_GITHUB_LOCATION - cd ./serverless-patterns/msk-lambda-iam-python-sam + cd ./serverless-patterns/msk-lambda-iam-node-sam cp template_original.yaml template.yaml sudo chown -R ec2-user . source /home/ec2-user/.bash_profile @@ -459,17 +466,25 @@ Resources: yum install jq -y #install node.js + echo "#!/bin/bash" > nodejs_installer.sh + echo "touch ~/.bashrc" >> nodejs_installer.sh + echo "curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash" >> nodejs_installer.sh + echo "source ~/.bashrc" >> nodejs_installer.sh NODEJS_VERSION=${nodejs_version} - if ["$NODEJS_VERSION" == "nodejs18.x"] - sudo dnf install nodejs - elif ["$NODEJS_VERSION" == "nodejs20.x"] - sudo dnf install nodejs20 - elif ["$NODEJS_VERSION" == "nodejs22.x"] - sudo dnf install nodejs20 + if ["$NODEJS_VERSION" == "nodejs18.x"]; then + echo "nvm install 18" >> nodejs_installer.sh + elif ["$NODEJS_VERSION" == "nodejs20.x"]; then + echo "nvm install 20" >> nodejs_installer.sh + elif ["$NODEJS_VERSION" == "nodejs22.x"]; then + echo "nvm install 22" >> nodejs_installer.sh else - sudo dnf install nodejs + echo "nvm install 18" >> nodejs_installer.sh fi - echo "export NODEJS_VERSION=$NODEJS_VERSION" >> /home/ec2-user/.bash_profile + echo "source ~/.bashrc" >> nodejs_installer.sh + sudo chmod +x nodejs_installer.sh + cp ./nodejs_installer.sh /home/ec2-user/nodejs_installer.sh + cd cd /home/ec2-user + sudo -u ec2-user ./nodejs_installer.sh > nodejs_installer_output.txt # install and start docker sudo yum install -y docker @@ -545,7 +560,7 @@ Resources: cd /home/ec2-user SERVERLESS_LAND_GITHUB_LOCATION=${serverless_land_github_location} git clone $SERVERLESS_LAND_GITHUB_LOCATION - cd ./serverless-patterns/msk-lambda-iam-python-sam + cd ./serverless-patterns/msk-lambda-iam-node-sam cp template_original.yaml template.yaml sudo chown -R ec2-user . source /home/ec2-user/.bash_profile diff --git a/msk-lambda-iam-node-sam/README.md b/msk-lambda-iam-node-sam/README.md index 4551eab6c..62465ed68 100644 --- a/msk-lambda-iam-node-sam/README.md +++ b/msk-lambda-iam-node-sam/README.md @@ -1,55 +1,128 @@ -# Node.js Lambda Kafka Consumer with IAM Auth, using SAM +# Node.js AWS Lambda Kafka consumer with IAM auth, using AWS SAM -This pattern is an example of a Lambda function that consumes messages from an Amazon Managed Streaming for Kafka (Amazon MSK) topic, where the MSK Cluster has been configured to use IAM authentication. This pattern assumes you already have an MSK cluster with a topic configured, if you need a sample pattern to deploy an MSK cluster either in Provisioned or Serverless modes please see the [msk-cfn-sasl-lambda pattern](https://serverlessland.com/patterns/msk-cfn-sasl-lambda). +This pattern is an example of a Lambda function that consumes messages from an Amazon Managed Streaming for Kafka (Amazon MSK) topic, where the MSK Cluster has been configured to use IAM authentication. This project contains source code and supporting files for a serverless application that you can deploy with the AWS Serverless Application Model (AWS SAM) CLI. It includes the following files and folders. - HandlerKafka - Code for the application's Lambda function. - events - Invocation events that you can use to invoke the function. -- template.yaml - An AWS SAM template that defines the application's AWS resources. +- template_original.yaml - An AWS SAM template that defines the application's AWS resources. +- MSKAndKafkaClientEC2.yaml - A Cloudformation template file that can be used to deploy an MSK cluster and also deploy an EC2 machine with all pre-requisities already installed, so you can directly build and deploy the lambda function and test it out. -The application creates a Lambda function that listens to Kafka messages on a topic of an MSK Cluster. These resources are defined in the `template.yaml` file in this project. You can update the template to add AWS resources through the same deployment process that updates your application code. +The application creates a Lambda function that listens to Kafka messages on a topic of an MSK Cluster. These resources are defined in the `template_original.yaml` file in this project. You can update the template to add AWS resources through the same deployment process that updates your application code. Important: this application uses various AWS services and there are costs associated with these services after the Free Tier usage - please see the [AWS Pricing page](https://aws.amazon.com/pricing/) for details. You are responsible for any AWS costs incurred. No warranty is implied in this example. ## Requirements * [Create an AWS account](https://portal.aws.amazon.com/gp/aws/developer/registration/index.html) if you do not already have one and log in. The IAM user that you use must have sufficient permissions to make necessary AWS service calls and manage AWS resources. -* [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) installed and configured -* [Git installed](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) -* [AWS Serverless Application Model](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) (AWS SAM) installed -* Create MSK cluster and topic that will be used for testing. It is important to create the topic before deploying the Lambda function, otherwise the event source mapping will stay disabled. -## Deploy the sample application +## Run the Cloudformation template to create the MSK Cluster and Client EC2 machine -The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. +* [Run the Cloudformation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS Cloudformation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the Cloudformation stack to be created. This Cloudformation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 machine that you can use as a client. + +* [Connect to the EC2 machine] - Once the Cloudformation stack is created, you can go to the EC2 console and log into the machine using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. +Note: You may need to wait for some time after the Cloudformation stack is created, as some UserData scripts continue running after the Cloudformation stack shows Created. + +* [Check if Kafka Topic has been created] - Once you are inside the EC2 machine, you should be in the /home/ec2-user folder. Check to see the contents of the file kafka_topic_creator_output.txt by running the command cat kafka_topic_creator_output.txt. You should see an output such as "Created topic MskIamNodejsLambdaTopic." + +If you are not able to find the file kafka_topic_creator_output.txt or if it is blank or you see an error message, then you need to run the file ./kafka_topic_creator.sh. This file runs a script that goes and creates the Kafka topic that the Lambda function will subscribe to. + +## Pre-requisites to Deploy the sample Lambda function -To use the AWS SAM CLI, you need the following tools. +The EC2 machine that was created by running the Cloudformation template has all the software that will be needed to deploy the Lambda function. -* AWS SAM CLI - [Install the AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) -* Docker - [Install Docker community edition](https://hub.docker.com/search/?type=edition&offering=community) +The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. + +* Node.js - We have installed the version of Node.js that you picked up at the time of specifying the input parameters to the Cloudformation template +* AWS SAM CLI - We have installed the AWS SAM CLI (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +* Docker - We have installed the Docker Community Edition on the EC2 machine (https://hub.docker.com/search/?type=edition&offering=community) -1. Create a new directory, navigate to that directory in a terminal and clone the GitHub repository: +We have also cloned the Github repository for serverless-patterns on the EC2 machine already by running the below command ``` git clone https://github.com/aws-samples/serverless-patterns.git ``` -1. Change directory to the pattern directory: +Change directory to the pattern directory: ``` - cd msk-lambda-iam-node-sam + cd serverless-patterns/msk-lambda-iam-node-sam ``` +## Use the SAM CLI to build and test locally -1. From the command line, use AWS SAM to deploy the AWS resources for the pattern as specified in the template.yml file: - ``` - sam build - sam deploy --guided - ``` +Build your application with the `sam build` command. + +```bash +sam build +``` + +The SAM CLI creates a deployment package, and saves it in the `.aws-sam/build` folder. + +Test a single function by invoking it directly with a test event. An event is a JSON document that represents the input that the function receives from the event source. Test events are included in the `events` folder in this project. + +Run functions locally and invoke them with the `sam local invoke` command. + +```bash +sam local invoke --event events/event.json +``` + +You should see a response such as the below: +``` +***** Begin sam local invoke response ***** + +START RequestId: b516e210-3534-443b-bacb-38a16ef5b76c Version: $LATEST +2024-12-09T07:43:02.057Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Key: myTopic-0 +2024-12-09T07:43:02.058Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Record: { + topic: 'myTopic', + partition: 0, + offset: 250, + timestamp: 1678072110111, + timestampType: 'CREATE_TIME', + value: 'Zg==', + headers: [] +} +2024-12-09T07:43:02.060Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Topic: myTopic +2024-12-09T07:43:02.060Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Partition: 0 +2024-12-09T07:43:02.060Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Offset: 250 +2024-12-09T07:43:02.060Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Timestamp: 1678072110111 +2024-12-09T07:43:02.060Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO TimestampType: CREATE_TIME +2024-12-09T07:43:02.060Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Key: null +2024-12-09T07:43:02.060Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Value: f +2024-12-09T07:43:02.060Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Record: { + topic: 'myTopic', + partition: 0, + offset: 251, + timestamp: 1678072111086, + timestampType: 'CREATE_TIME', + value: 'Zw==', + headers: [] +} +2024-12-09T07:43:02.061Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Topic: myTopic +2024-12-09T07:43:02.061Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Partition: 0 +2024-12-09T07:43:02.061Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Offset: 251 +2024-12-09T07:43:02.062Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Timestamp: 1678072111086 +2024-12-09T07:43:02.062Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO TimestampType: CREATE_TIME +2024-12-09T07:43:02.062Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Key: null +2024-12-09T07:43:02.062Z 05ddf736-39f9-4956-a8b1-d651883cdcba INFO Value: g +END RequestId: 05ddf736-39f9-4956-a8b1-d651883cdcba +REPORT RequestId: 05ddf736-39f9-4956-a8b1-d651883cdcba Init Duration: 0.10 ms Duration: 112.94 ms Billed Duration: 113 ms Memory Size: 128 MB Max Memory Used: 128 MB + +***** End sam local invoke response ***** +``` + +## Deploy the sample application + + +To deploy your application for the first time, run the following in your shell: + +```bash +sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-lambda-iam-node-sam --guided +``` + +The sam deploy command will package and deploy your application to AWS, with a series of prompts. You can accept all the defaults by hitting Enter: -1. During the prompts: * **Stack Name**: The name of the stack to deploy to CloudFormation. This should be unique to your account and region, and a good starting point would be something matching your project name. * **AWS Region**: The AWS region you want to deploy your app to. -* **Parameter MSKClusterName**: The name of the MSKCluster, eg. msk-test-cluster - -* **Parameter MSKClusterId**: The unique ID of the MSKCluster, eg. a4e132c8-6ad0-4334-a313-123456789012-s2 +* **Parameter MSKClusterName**: The name of the MSKCluster +* **Parameter MSKClusterId**: The unique ID of the MSKCluster * **Parameter MSKTopic**: The Kafka topic on which the lambda function will listen on * **Confirm changes before deploy**: If set to yes, any change sets will be shown to you before execution for manual review. If set to no, the AWS SAM CLI will automatically deploy application changes. * **Allow SAM CLI IAM role creation**: Many AWS SAM templates, including this example, create AWS IAM roles required for the AWS Lambda function(s) included to access AWS services. By default, these are scoped down to minimum required permissions. To deploy an AWS CloudFormation stack which creates or modifies IAM roles, the `CAPABILITY_IAM` value for `capabilities` must be provided. If permission isn't provided through this prompt, to deploy this example you must explicitly pass `--capabilities CAPABILITY_IAM` to the `sam deploy` command. @@ -58,7 +131,7 @@ To use the AWS SAM CLI, you need the following tools. * **SAM configuration file [samconfig.toml]**: Name of the configuration file to store configuration information locally * **SAM configuration environment [default]**: Environment for storing deployment information locally -You should get a message "Successfully created/updated stack - in " if all goes well. +You should get a message "Successfully created/updated stack - in " if all goes well Once you have run `sam deploy --guided` mode once and saved arguments to a configuration file (samconfig.toml), you can use `sam deploy` in future to use these defaults. @@ -68,8 +141,28 @@ This pattern creates a Lambda function along with a Lambda Event Source Mapping( ## Test the sample application -Once the Lambda function is deployed, send some Kafka messages to the topic that you configured in the Lambda function trigger. +Once the lambda function is deployed, send some Kafka messages on the topic that the lambda function is listening on, on the MSK server. + +For your convenience, a script has been created on the EC2 machine that was provisioned using Cloudformation. + +cd /home/ec2-user + +You should see a script called kafka_message_sender.sh. Run that script and you should be able to send a new Kafka message in every line as shown below +``` +[ec2-user@ip-10-0-0-126 ~]$ sh kafka_message_sender.sh +>My first message +>My second message +>My third message +>My fourth message +>My fifth message +>My sixth message +>My seventh message +>My eigth message +>My ninth message +>My tenth message +>Ctrl-C +``` Either send at least 10 messages or wait for 300 seconds (check the values of BatchSize: 10 and MaximumBatchingWindowInSeconds: 300 in the template.yaml file) Then check Amazon CloudWatch logs and you should see messages in the CloudWatch Log Group with the name of the deployed Lambda function. @@ -84,44 +177,19 @@ The `Key` and `Value` are base64 encoded and have to be decoded. A message can a The code in this example prints out the fields in the Kafka message and also decrypts the key and the value and logs them to CloudWatch logs. +## Cleanup + +You can first clean-up the Lambda function by running the sam delete command -### Local development - -**You can invoke the function locally using `sam local`** - -```bash -sam local invoke --event=events/event.json ``` +cd /home/ec2-user/serverless-patterns/msk-lambda-iam-node-sam +sam delete -You should see a response similar to the below - -START RequestId: 2d1041e7-fb49-4181-a8ac-15277f5d2b6c Version: $LATEST -2023-03-31T22:29:21.659Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Key: myTopic-0 -2023-03-31T22:29:21.699Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO R} headers: []=', 'CREATE_TIME', -2023-03-31T22:29:21.701Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Topic: myTopic -2023-03-31T22:29:21.701Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Partition: 0 -2023-03-31T22:29:21.701Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Offset: 250 -2023-03-31T22:29:21.701Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Timestamp: 1678072110111 -2023-03-31T22:29:21.702Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO TimestampType: CREATE_TIME -2023-03-31T22:29:21.702Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Key: null -2023-03-31T22:29:21.705Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Value: f -2023-03-31T22:29:21.710Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO R} headers: []=', 'CREATE_TIME', -2023-03-31T22:29:21.712Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Topic: myTopic -2023-03-31T22:29:21.713Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Partition: 0 -2023-03-31T22:29:21.719Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Offset: 251 -2023-03-31T22:29:21.725Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Timestamp: 1678072111086 -2023-03-31T22:29:21.725Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO TimestampType: CREATE_TIME -2023-03-31T22:29:21.725Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Key: null -2023-03-31T22:29:21.726Z 2d1041e7-fb49-4181-a8ac-15277f5d2b6c INFO Value: g -END RequestId: 2d1041e7-fb49-4181-a8ac-15277f5d2b6c -REPORT RequestId: 2d1041e7-fb49-4181-a8ac-15277f5d2b6c Init Duration: 11.37 msDuration: 2696.60 ms Billed Duration: 2697 ms Memory Size: 128 MB Max Memory Used: 128 MB +``` +confirm by pressing y for both the questions +You should see the lambda function getting deleted and a final confirmation "Deleted successfully" on the command-line -## Cleanup - -1. Delete the stack - ```bash - sam delete - ``` +Next you need to delete the Cloudformation template that created the MSK Server and the EC2 machine by going to the Cloudformation console and selecting the stack and then hitting the "Delete" button. It will run for sometime but eventually you should see the stack getting cleaned up. ---- Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. From db32997462d0ab4af4d25b494236252397a0f9b9 Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Mon, 9 Dec 2024 13:09:31 -0800 Subject: [PATCH 08/11] Fixed CFT for Node.js Installation block --- msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml index 5492a654e..ba63651f8 100644 --- a/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml +++ b/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml @@ -310,11 +310,12 @@ Resources: echo "curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash" >> nodejs_installer.sh echo "source ~/.bashrc" >> nodejs_installer.sh NODEJS_VERSION=${nodejs_version} - if ["$NODEJS_VERSION" == "nodejs18.x"]; then + echo "NODEJS_VERSION=$NODEJS_VERSION" >> /home/ec2-user/bash_profile + if [ "$NODEJS_VERSION" == "nodejs18.x" ]; then echo "nvm install 18" >> nodejs_installer.sh - elif ["$NODEJS_VERSION" == "nodejs20.x"]; then + elif [ "$NODEJS_VERSION" == "nodejs20.x" ]; then echo "nvm install 20" >> nodejs_installer.sh - elif ["$NODEJS_VERSION" == "nodejs22.x"]; then + elif [ "$NODEJS_VERSION" == "nodejs22.x" ]; then echo "nvm install 22" >> nodejs_installer.sh else echo "nvm install 18" >> nodejs_installer.sh @@ -471,11 +472,12 @@ Resources: echo "curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash" >> nodejs_installer.sh echo "source ~/.bashrc" >> nodejs_installer.sh NODEJS_VERSION=${nodejs_version} - if ["$NODEJS_VERSION" == "nodejs18.x"]; then + echo "NODEJS_VERSION=$NODEJS_VERSION" >> /home/ec2-user/bash_profile + if [ "$NODEJS_VERSION" == "nodejs18.x" ]; then echo "nvm install 18" >> nodejs_installer.sh - elif ["$NODEJS_VERSION" == "nodejs20.x"]; then + elif [ "$NODEJS_VERSION" == "nodejs20.x" ]; then echo "nvm install 20" >> nodejs_installer.sh - elif ["$NODEJS_VERSION" == "nodejs22.x"]; then + elif [ "$NODEJS_VERSION" == "nodejs22.x" ]; then echo "nvm install 22" >> nodejs_installer.sh else echo "nvm install 18" >> nodejs_installer.sh From 44373ea0775d9e166e442319e57a4daf0a614d71 Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Fri, 3 Jan 2025 16:00:27 -0800 Subject: [PATCH 09/11] Updated the MSKAndKafkaClientEC2.yaml files in msk-lambda-iam-node-sam and msk-lambda-iam-python-sam to include retry mechanism for all yum install commands --- .../MSKAndKafkaClientEC2.yaml | 291 +++++++++++++++-- .../MSKAndKafkaClientEC2.yaml | 300 ++++++++++++++++-- 2 files changed, 553 insertions(+), 38 deletions(-) diff --git a/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml index ba63651f8..f2141b8bd 100644 --- a/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml +++ b/msk-lambda-iam-node-sam/MSKAndKafkaClientEC2.yaml @@ -297,12 +297,92 @@ Resources: !Sub - | #!/bin/bash - yum update -y - sudo yum install ec2-instance-connect - sudo yum install java-11-amazon-corretto-devel -y - yum install nmap-ncat -y - yum install git -y - yum install jq -y + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum update -y + sudo yum install ec2-instance-connect + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of ec2-instance-connect succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install java-11-amazon-corretto-devel -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of java-11-amazon-corretto-devel succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install nmap-ncat -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of nmap-ncat succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install git -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of git succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install jq -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of jq succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done #install node.js echo "#!/bin/bash" > nodejs_installer.sh @@ -324,15 +404,63 @@ Resources: sudo chmod +x nodejs_installer.sh cp ./nodejs_installer.sh /home/ec2-user/nodejs_installer.sh cd cd /home/ec2-user - sudo -u ec2-user ./nodejs_installer.sh > nodejs_installer_output.txt + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo -u ec2-user ./nodejs_installer.sh > nodejs_installer_output.txt + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Install of nodejs succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done # install and start docker - sudo yum install -y docker + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install -y docker + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Install of docker succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done sudo service docker start sudo usermod -a -G docker ec2-user # install AWS CLI 2 - access with aws2 - sudo yum remove awscli + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum remove awscli + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Removal of awscli succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + cd /home/ec2-user mkdir -p awscli cd awscli @@ -459,12 +587,91 @@ Resources: !Sub - | #!/bin/bash - yum update -y - sudo yum install ec2-instance-connect - sudo yum install java-11-amazon-corretto-devel -y - yum install nmap-ncat -y - yum install git -y - yum install jq -y + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum update -y + sudo yum install ec2-instance-connect + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of ec2-instance-connect succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install java-11-amazon-corretto-devel -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of java-11-amazon-corretto-devel succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install nmap-ncat -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of nmap-ncat succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install git -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of git succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install jq -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of jq succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done #install node.js echo "#!/bin/bash" > nodejs_installer.sh @@ -486,15 +693,63 @@ Resources: sudo chmod +x nodejs_installer.sh cp ./nodejs_installer.sh /home/ec2-user/nodejs_installer.sh cd cd /home/ec2-user - sudo -u ec2-user ./nodejs_installer.sh > nodejs_installer_output.txt + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo -u ec2-user ./nodejs_installer.sh > nodejs_installer_output.txt + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Install of nodejs succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + # install and start docker - sudo yum install -y docker + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install -y docker + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Install of docker succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done sudo service docker start sudo usermod -a -G docker ec2-user # install AWS CLI 2 - access with aws2 - sudo yum remove awscli + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum remove awscli + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Removal of awscli succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + cd /home/ec2-user mkdir -p awscli cd awscli diff --git a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml index cf056881e..19aca3c14 100644 --- a/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml +++ b/msk-lambda-iam-python-sam/MSKAndKafkaClientEC2.yaml @@ -13,10 +13,9 @@ Parameters: Default: '/aws/service/ami-amazon-linux-latest/al2023-ami-kernel-6.1-x86_64' Python3Version: Type: String - Description: Choose the version of Python 3 between 3.9 and 3.12. Note that in Amazon Linux 2023, 3.9 is installed by default and maximum allowed version is 3.12 + Description: Choose the version of Python 3 between 3.9 and 3.12. Note that in Amazon Linux 2023, 3.9 is installed by default and maximum allowed version is 3.12. Also Python 3.10 is not available to install on Amazon Linux 2023 so it is not being offered as an option AllowedValues: - python3.9 - - python3.10 - python3.11 - python3.12 Default: python3.12 @@ -298,25 +297,155 @@ Resources: !Sub - | #!/bin/bash - yum update -y - sudo yum install ec2-instance-connect - sudo yum install java-11-amazon-corretto-devel -y - yum install nmap-ncat -y - yum install git -y - yum install jq -y + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum update -y + sudo yum install ec2-instance-connect + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of ec2-instance-connect succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install java-11-amazon-corretto-devel -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of java-11-amazon-corretto-devel succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install nmap-ncat -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of nmap-ncat succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install git -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of git succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install jq -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of jq succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done #install latest python3 PYTHON3_VERSION=${python3_version} - sudo yum install $PYTHON3_VERSION -y + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install $PYTHON3_VERSION -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of $PYTHON3_VERSION succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + echo "export PYTHON3_VERSION=$PYTHON3_VERSION" >> /home/ec2-user/.bash_profile # install and start docker - sudo yum install -y docker + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install -y docker + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of docker succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + sudo service docker start sudo usermod -a -G docker ec2-user # install AWS CLI 2 - access with aws2 - sudo yum remove awscli + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum remove awscli + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum remove of awscli succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + cd /home/ec2-user mkdir -p awscli cd awscli @@ -443,25 +572,155 @@ Resources: !Sub - | #!/bin/bash - yum update -y - sudo yum install ec2-instance-connect - sudo yum install java-11-amazon-corretto-devel -y - yum install nmap-ncat -y - yum install git -y - yum install jq -y + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum update -y + sudo yum install ec2-instance-connect + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of ec2-instance-connect succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install java-11-amazon-corretto-devel -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of java-11-amazon-corretto-devel succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install nmap-ncat -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of nmap-ncat succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install git -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of git succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install jq -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of jq succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done #install latest python3 PYTHON3_VERSION=${python3_version} - sudo yum install $PYTHON3_VERSION -y + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install $PYTHON3_VERSION -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of $PYTHON3_VERSION succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + echo "export PYTHON3_VERSION=$PYTHON3_VERSION" >> /home/ec2-user/.bash_profile # install and start docker - sudo yum install -y docker + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install -y docker + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of docker succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + sudo service docker start sudo usermod -a -G docker ec2-user # install AWS CLI 2 - access with aws2 - sudo yum remove awscli + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum remove awscli + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum remove of awscli succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + cd /home/ec2-user mkdir -p awscli cd awscli @@ -469,6 +728,7 @@ Resources: unzip awscliv2.zip sudo ./aws/install + # Create dirs, get Apache Kafka and unpack it cd /home/ec2-user KAFKA_VERSION=${msk_kafka_version} From 51ae34bfced46e8ab7c3028a50b0055388b9425c Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Fri, 3 Jan 2025 17:43:06 -0800 Subject: [PATCH 10/11] Updated the README.md files for msk-lambda-iam-node-sam and msk-lambda-iam-python-sam based on review feedback from Ben Freiberg --- msk-lambda-iam-node-sam/README.md | 42 ++++++++++++++--------------- msk-lambda-iam-python-sam/README.md | 42 ++++++++++++++--------------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/msk-lambda-iam-node-sam/README.md b/msk-lambda-iam-node-sam/README.md index 62465ed68..b295ebdda 100644 --- a/msk-lambda-iam-node-sam/README.md +++ b/msk-lambda-iam-node-sam/README.md @@ -7,7 +7,7 @@ This project contains source code and supporting files for a serverless applicat - HandlerKafka - Code for the application's Lambda function. - events - Invocation events that you can use to invoke the function. - template_original.yaml - An AWS SAM template that defines the application's AWS resources. -- MSKAndKafkaClientEC2.yaml - A Cloudformation template file that can be used to deploy an MSK cluster and also deploy an EC2 machine with all pre-requisities already installed, so you can directly build and deploy the lambda function and test it out. +- MSKAndKafkaClientEC2.yaml - An AWS CloudFormation template file that can be used to deploy an MSK cluster and also deploy an Amazon EC2 instance with all prerequisites already installed, so you can directly build and deploy the Lambda function and test it out. The application creates a Lambda function that listens to Kafka messages on a topic of an MSK Cluster. These resources are defined in the `template_original.yaml` file in this project. You can update the template to add AWS resources through the same deployment process that updates your application code. @@ -17,34 +17,34 @@ Important: this application uses various AWS services and there are costs associ * [Create an AWS account](https://portal.aws.amazon.com/gp/aws/developer/registration/index.html) if you do not already have one and log in. The IAM user that you use must have sufficient permissions to make necessary AWS service calls and manage AWS resources. -## Run the Cloudformation template to create the MSK Cluster and Client EC2 machine +## Deploy the CloudFormation template to create the MSK Cluster and Client EC2 instance -* [Run the Cloudformation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS Cloudformation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the Cloudformation stack to be created. This Cloudformation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 machine that you can use as a client. +* [Deploy the CloudFormation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS CloudFormation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the CloudFormation stack to be created. This CloudFormation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 instance that you can use as a client. -* [Connect to the EC2 machine] - Once the Cloudformation stack is created, you can go to the EC2 console and log into the machine using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. -Note: You may need to wait for some time after the Cloudformation stack is created, as some UserData scripts continue running after the Cloudformation stack shows Created. +* [Connect to the EC2 instance] - Once the CloudFormation stack is created, you can go to the EC2 console and log into the instance using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. +Note: You may need to wait for some time after the CloudFormation stack is created, as some UserData scripts continue running after the CloudFormation stack shows Created. -* [Check if Kafka Topic has been created] - Once you are inside the EC2 machine, you should be in the /home/ec2-user folder. Check to see the contents of the file kafka_topic_creator_output.txt by running the command cat kafka_topic_creator_output.txt. You should see an output such as "Created topic MskIamNodejsLambdaTopic." +* [Check if Kafka Topic has been created] - Once you are logged into the EC2 instance, you should be in the `/home/ec2-user` folder. Check to see the contents of the file `kafka_topic_creator_output.txt` by running the command `cat kafka_topic_creator_output.txt`. You should see an output such as "Created topic MskIamNodejsLambdaTopic." -If you are not able to find the file kafka_topic_creator_output.txt or if it is blank or you see an error message, then you need to run the file ./kafka_topic_creator.sh. This file runs a script that goes and creates the Kafka topic that the Lambda function will subscribe to. +If you are not able to find the file `kafka_topic_creator_output.txt` or if it is blank or you see an error message, then you need to run `./kafka_topic_creator.sh`. This runs a script that creates the Kafka topic that the Lambda function will subscribe to. ## Pre-requisites to Deploy the sample Lambda function -The EC2 machine that was created by running the Cloudformation template has all the software that will be needed to deploy the Lambda function. +The EC2 instance that was created by running the CloudFormation template has all the software that will be needed to deploy the Lambda function. The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. -* Node.js - We have installed the version of Node.js that you picked up at the time of specifying the input parameters to the Cloudformation template +* Node.js - We have installed the version of Node.js that you picked up at the time of specifying the input parameters to the CloudFormation template * AWS SAM CLI - We have installed the AWS SAM CLI (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) -* Docker - We have installed the Docker Community Edition on the EC2 machine (https://hub.docker.com/search/?type=edition&offering=community) +* Docker - We have installed the Docker Community Edition on the EC2 instance (https://hub.docker.com/search/?type=edition&offering=community) -We have also cloned the Github repository for serverless-patterns on the EC2 machine already by running the below command +We have also cloned the Github repository for serverless-patterns on the EC2 instance already by running the below command ``` git clone https://github.com/aws-samples/serverless-patterns.git ``` Change directory to the pattern directory: ``` - cd serverless-patterns/msk-lambda-iam-node-sam + cd serverless-patterns/msk-Lambda-iam-node-sam ``` ## Use the SAM CLI to build and test locally @@ -114,7 +114,7 @@ REPORT RequestId: 05ddf736-39f9-4956-a8b1-d651883cdcba Init Duration: 0.10 ms To deploy your application for the first time, run the following in your shell: ```bash -sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-lambda-iam-node-sam --guided +sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-Lambda-iam-node-sam --guided ``` The sam deploy command will package and deploy your application to AWS, with a series of prompts. You can accept all the defaults by hitting Enter: @@ -123,7 +123,7 @@ The sam deploy command will package and deploy your application to AWS, with a s * **AWS Region**: The AWS region you want to deploy your app to. * **Parameter MSKClusterName**: The name of the MSKCluster * **Parameter MSKClusterId**: The unique ID of the MSKCluster -* **Parameter MSKTopic**: The Kafka topic on which the lambda function will listen on +* **Parameter MSKTopic**: The Kafka topic on which the Lambda function will listen on * **Confirm changes before deploy**: If set to yes, any change sets will be shown to you before execution for manual review. If set to no, the AWS SAM CLI will automatically deploy application changes. * **Allow SAM CLI IAM role creation**: Many AWS SAM templates, including this example, create AWS IAM roles required for the AWS Lambda function(s) included to access AWS services. By default, these are scoped down to minimum required permissions. To deploy an AWS CloudFormation stack which creates or modifies IAM roles, the `CAPABILITY_IAM` value for `capabilities` must be provided. If permission isn't provided through this prompt, to deploy this example you must explicitly pass `--capabilities CAPABILITY_IAM` to the `sam deploy` command. * **Disable rollback**: Defaults to No and it preserves the state of previously provisioned resources when an operation fails @@ -141,11 +141,11 @@ This pattern creates a Lambda function along with a Lambda Event Source Mapping( ## Test the sample application -Once the lambda function is deployed, send some Kafka messages on the topic that the lambda function is listening on, on the MSK server. +Once the Lambda function is deployed, send some Kafka messages to the topic that the Lambda function is listening on, on the MSK server. -For your convenience, a script has been created on the EC2 machine that was provisioned using Cloudformation. +For your convenience, a script has been created on the EC2 instance that was provisioned using CloudFormation. -cd /home/ec2-user +`cd /home/ec2-user` You should see a script called kafka_message_sender.sh. Run that script and you should be able to send a new Kafka message in every line as shown below @@ -179,17 +179,17 @@ The code in this example prints out the fields in the Kafka message and also dec ## Cleanup -You can first clean-up the Lambda function by running the sam delete command +You can first clean-up the Lambda function by running the `sam delete` command ``` -cd /home/ec2-user/serverless-patterns/msk-lambda-iam-node-sam +cd /home/ec2-user/serverless-patterns/msk-Lambda-iam-node-sam sam delete ``` confirm by pressing y for both the questions -You should see the lambda function getting deleted and a final confirmation "Deleted successfully" on the command-line +You should see the Lambda function getting deleted and a final confirmation "Deleted successfully" on the command line -Next you need to delete the Cloudformation template that created the MSK Server and the EC2 machine by going to the Cloudformation console and selecting the stack and then hitting the "Delete" button. It will run for sometime but eventually you should see the stack getting cleaned up. +Next, you delete the CloudFormation template that created the MSK cluster and the EC2 instance by going to the CloudFormation console and selecting the stack. Then select the "Delete" button. Please note that it might take a while to complete. ---- Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/msk-lambda-iam-python-sam/README.md b/msk-lambda-iam-python-sam/README.md index 63a20fd7a..6cffdd32f 100644 --- a/msk-lambda-iam-python-sam/README.md +++ b/msk-lambda-iam-python-sam/README.md @@ -7,7 +7,7 @@ This project contains source code and supporting files for a serverless applicat - HandlerKafka - Code for the application's Lambda function. - events - Invocation events that you can use to invoke the function. - template_original.yaml - An AWS SAM template that defines the application's AWS resources. -- MSKAndKafkaClientEC2.yaml - A Cloudformation template file that can be used to deploy an MSK cluster and also deploy an EC2 machine with all pre-requisities already installed, so you can directly build and deploy the lambda function and test it out. +- MSKAndKafkaClientEC2.yaml - An AWS CloudFormation template file that can be used to deploy an MSK cluster and also deploy an Amazon EC2 instance with all prerequisites already installed, so you can directly build and deploy the Lambda function and test it out. The application creates a Lambda function that listens to Kafka messages on a topic of an MSK Cluster. These resources are defined in the `template_original.yaml` file in this project. You can update the template to add AWS resources through the same deployment process that updates your application code. @@ -17,34 +17,34 @@ Important: this application uses various AWS services and there are costs associ * [Create an AWS account](https://portal.aws.amazon.com/gp/aws/developer/registration/index.html) if you do not already have one and log in. The IAM user that you use must have sufficient permissions to make necessary AWS service calls and manage AWS resources. -## Run the Cloudformation template to create the MSK Cluster and Client EC2 machine +## Deploy the CloudFormation template to create the MSK Cluster and Client EC2 instance -* [Run the Cloudformation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS Cloudformation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the Cloudformation stack to be created. This Cloudformation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 machine that you can use as a client. +* [Deploy the CloudFormation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS CloudFormation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the CloudFormation stack to be created. This CloudFormation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 instance that you can use as a client. -* [Connect to the EC2 machine] - Once the Cloudformation stack is created, you can go to the EC2 console and log into the machine using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. -Note: You may need to wait for some time after the Cloudformation stack is created, as some UserData scripts continue running after the Cloudformation stack shows Created. +* [Connect to the EC2 instance] - Once the CloudFormation stack is created, you can go to the EC2 console and log into the machine using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. +Note: You may need to wait for some time after the CloudFormation stack is created, as some UserData scripts continue running after the CloudFormation stack shows Created. -* [Check if Kafka Topic has been created] - Once you are inside the EC2 machine, you should be in the /home/ec2-user folder. Check to see the contents of the file kafka_topic_creator_output.txt by running the command cat kafka_topic_creator_output.txt. You should see an output such as "Created topic MskIamPythonLambdaTopic." +* [Check if Kafka Topic has been created] - Once you are inside the EC2 instance, you should be in the /home/ec2-user folder. Check to see the contents of the file kafka_topic_creator_output.txt by running the command cat kafka_topic_creator_output.txt. You should see an output such as "Created topic MskIamPythonLambdaTopic." -If you are not able to find the file kafka_topic_creator_output.txt or if it is blank or you see an error message, then you need to run the file ./kafka_topic_creator.sh. This file runs a script that goes and creates the Kafka topic that the Lambda function will subscribe to. +If you are not able to find the file `kafka_topic_creator_output.txt` or if it is blank or you see an error message, then you need to run `./kafka_topic_creator.sh`. This runs a script that creates the Kafka topic that the Lambda function will subscribe to. ## Pre-requisites to Deploy the sample Lambda function -The EC2 machine that was created by running the Cloudformation template has all the software that will be needed to deploy the Lambda function. +The EC2 instance that was created by running the CloudFormation template has all the software that will be needed to deploy the Lambda function. The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. -* Python3 - We have installed the version of Python3 that you picked up at the time of specifying the input parameters to the Cloudformation template +* Python3 - We have installed the version of Python3 that you picked up at the time of specifying the input parameters to the CloudFormation template * AWS SAM CLI - We have installed the AWS SAM CLI (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) -* Docker - We have installed the Docker Community Edition on the EC2 machine (https://hub.docker.com/search/?type=edition&offering=community) +* Docker - We have installed the Docker Community Edition on the EC2 instance (https://hub.docker.com/search/?type=edition&offering=community) -We have also cloned the Github repository for serverless-patterns on the EC2 machine already by running the below command +We have also cloned the Github repository for serverless-patterns on the EC2 instance already by running the below command ``` git clone https://github.com/aws-samples/serverless-patterns.git ``` Change directory to the pattern directory: ``` - cd serverless-patterns/msk-lambda-iam-python-sam + cd serverless-patterns/msk-Lambda-iam-python-sam ``` ## Use the SAM CLI to build and test locally @@ -147,7 +147,7 @@ REPORT RequestId: 5c10310a-abf9-416e-b017-697d2c3ba097 Init Duration: 6.68 ms Du To deploy your application for the first time, run the following in your shell: ```bash -sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-lambda-iam-python-sam --guided +sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-Lambda-iam-python-sam --guided ``` The sam deploy command will package and deploy your application to AWS, with a series of prompts. You can accept all the defaults by hitting Enter: @@ -156,7 +156,7 @@ The sam deploy command will package and deploy your application to AWS, with a s * **AWS Region**: The AWS region you want to deploy your app to. * **Parameter MSKClusterName**: The name of the MSKCluster * **Parameter MSKClusterId**: The unique ID of the MSKCluster -* **Parameter MSKTopic**: The Kafka topic on which the lambda function will listen on +* **Parameter MSKTopic**: The Kafka topic on which the Lambda function will listen on * **Confirm changes before deploy**: If set to yes, any change sets will be shown to you before execution for manual review. If set to no, the AWS SAM CLI will automatically deploy application changes. * **Allow SAM CLI IAM role creation**: Many AWS SAM templates, including this example, create AWS IAM roles required for the AWS Lambda function(s) included to access AWS services. By default, these are scoped down to minimum required permissions. To deploy an AWS CloudFormation stack which creates or modifies IAM roles, the `CAPABILITY_IAM` value for `capabilities` must be provided. If permission isn't provided through this prompt, to deploy this example you must explicitly pass `--capabilities CAPABILITY_IAM` to the `sam deploy` command. * **Disable rollback**: Defaults to No and it preserves the state of previously provisioned resources when an operation fails @@ -174,11 +174,11 @@ This pattern creates a Lambda function along with a Lambda Event Source Mapping( ## Test the sample application -Once the lambda function is deployed, send some Kafka messages on the topic that the lambda function is listening on, on the MSK server. +Once the Lambda function is deployed, send some Kafka messages to the topic that the Lambda function is listening on, on the MSK server. -For your convenience, a script has been created on the EC2 machine that was provisioned using Cloudformation. +For your convenience, a script has been created on the EC2 instance that was provisioned using CloudFormation. -cd /home/ec2-user +`cd /home/ec2-user` You should see a script called kafka_message_sender.sh. Run that script and you should be able to send a new Kafka message in every line as shown below @@ -212,17 +212,17 @@ The code in this example prints out the fields in the Kafka message and also dec ## Cleanup -You can first clean-up the Lambda function by running the sam delete command +You can first clean-up the Lambda function by running the `sam delete` command ``` -cd /home/ec2-user/serverless-patterns/msk-lambda-iam-python-sam +cd /home/ec2-user/serverless-patterns/msk-Lambda-iam-python-sam sam delete ``` confirm by pressing y for both the questions -You should see the lambda function getting deleted and a final confirmation "Deleted successfully" on the command-line +You should see the Lambda function getting deleted and a final confirmation "Deleted successfully" on the command line -Next you need to delete the Cloudformation template that created the MSK Server and the EC2 machine by going to the Cloudformation console and selecting the stack and then hitting the "Delete" button. It will run for sometime but eventually you should see the stack getting cleaned up. +Next, you delete the CloudFormation template that created the MSK cluster and the EC2 instance by going to the CloudFormation console and selecting the stack. Then select the "Delete" button. Please note that it might take a while to complete. ---- Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. From fd57c4828895170448cf7ecd9ae1bb94752a6513 Mon Sep 17 00:00:00 2001 From: indranilbanerjeeawssa Date: Fri, 31 Jan 2025 00:07:26 -0800 Subject: [PATCH 11/11] Fixed the example-pattern.json files to remove reference to broken CloudFormation template and README.md files to add note about existing MSK clusters in msk-lambda-iam-node-sam and msk-lambda-iam-python-sam patterns --- msk-lambda-iam-node-sam/README.md | 2 ++ msk-lambda-iam-node-sam/example-pattern.json | 22 +++++++--------- msk-lambda-iam-python-sam/README.md | 2 ++ .../example-pattern.json | 26 ++++++++----------- 4 files changed, 24 insertions(+), 28 deletions(-) diff --git a/msk-lambda-iam-node-sam/README.md b/msk-lambda-iam-node-sam/README.md index b295ebdda..cf21d4dfd 100644 --- a/msk-lambda-iam-node-sam/README.md +++ b/msk-lambda-iam-node-sam/README.md @@ -132,6 +132,8 @@ The sam deploy command will package and deploy your application to AWS, with a s * **SAM configuration environment [default]**: Environment for storing deployment information locally You should get a message "Successfully created/updated stack - in " if all goes well + +**Note: In case you want to deploy the Lambda function by pointing to an existing MSK Cluster and not the one created by running the CloudFormation template provided in this pattern, you will need to modify the values of the parameters MSKClusterName and MSKClusterId accordingly** Once you have run `sam deploy --guided` mode once and saved arguments to a configuration file (samconfig.toml), you can use `sam deploy` in future to use these defaults. diff --git a/msk-lambda-iam-node-sam/example-pattern.json b/msk-lambda-iam-node-sam/example-pattern.json index f64050116..6e2c5e4a8 100644 --- a/msk-lambda-iam-node-sam/example-pattern.json +++ b/msk-lambda-iam-node-sam/example-pattern.json @@ -8,10 +8,10 @@ "headline": "How it works", "text": [ "This pattern provides a Lambda function along with an Event Source Mapping to a Kafka topic.", - "It requires that you already have an Amazon Managed Streaming for Kafka(Amazon MSK) cluster setup with a topic created. If you don't already have an MSK cluster ", - "you can use the example in this pattern https://serverlessland.com/patterns/msk-cfn-sasl-lambda (linked in the resources) to deploy a cluster.", + "It requires that you already have an Amazon Managed Streaming for Kafka(Amazon MSK) cluster setup with a Kafka topic created.", + "The CloudFormation template provided in this pattern installs an MSK Cluster and creates a Kafka topic.", "This pattern works with either a Provisioned or Serverless MSK cluster as long as the cluster is configured to use IAM authentication. ", - "For detailed deployment instructions instructions see the README " + "For detailed deployment instructions instructions see the README.md" ] }, "gitHub": { @@ -24,10 +24,6 @@ }, "resources": { "bullets": [ - { - "text": "Amazon MSK Cluster pattern", - "link": "https://serverlessland.com/patterns/msk-cfn-sasl-lambda" - }, { "text": "Using AWS Lambda with Amazon MSK", "link": "https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html" @@ -58,6 +54,11 @@ ] }, "authors": [ + { + "name": "Indranil Banerjee", + "bio": "AWS - Senior Solutions Architect", + "linkedin": "https://www.linkedin.com/in/indranil-banerjee-b00a261/" + }, { "name": "Vaibhav Jain", "bio": "AWS - Sr. Application Architect", @@ -70,7 +71,7 @@ "image": "https://www.fintail.me/images/pa.jpg", "linkedin": "https://www.linkedin.com/in/pallam/" }, - { + { "name": "Suraj Tripathi", "bio": "AWS - AppDev Cloud Consultant", "linkedin": "https://www.linkedin.com/in/suraj-tripathi-01b49a140/" @@ -79,11 +80,6 @@ "name": "Adam Wagner", "bio": "AWS - Principal Serverless Solutions Architect", "linkedin": "https://www.linkedin.com/in/adam-wagner-4bb412/" - }, - { - "name": "Indranil Banerjee", - "bio": "AWS - Senior Solutions Architect", - "linkedin": "https://www.linkedin.com/in/indranil-banerjee-b00a261/" } ] } diff --git a/msk-lambda-iam-python-sam/README.md b/msk-lambda-iam-python-sam/README.md index 6cffdd32f..6a6729a8c 100644 --- a/msk-lambda-iam-python-sam/README.md +++ b/msk-lambda-iam-python-sam/README.md @@ -166,6 +166,8 @@ The sam deploy command will package and deploy your application to AWS, with a s You should get a message "Successfully created/updated stack - in " if all goes well +**Note: In case you want to deploy the Lambda function by pointing to an existing MSK Cluster and not the one created by running the CloudFormation template provided in this pattern, you will need to modify the values of the parameters MSKClusterName and MSKClusterId accordingly** + Once you have run `sam deploy --guided` mode once and saved arguments to a configuration file (samconfig.toml), you can use `sam deploy` in future to use these defaults. ## How it works diff --git a/msk-lambda-iam-python-sam/example-pattern.json b/msk-lambda-iam-python-sam/example-pattern.json index 4bff6743a..b391e8abf 100644 --- a/msk-lambda-iam-python-sam/example-pattern.json +++ b/msk-lambda-iam-python-sam/example-pattern.json @@ -7,11 +7,11 @@ "introBox": { "headline": "How it works", "text": [ - "This pattern provides a Lambda function along with an Event Source Mapping to a Kafka topic.", - "It requires that you already have an Amazon Managed Streaming for Kafka(Amazon MSK) cluster setup with a topic created. If you don't already have an MSK cluster ", - "you can use the example in this pattern https://serverlessland.com/patterns/msk-cfn-sasl-lambda (linked in the resources) to deploy a cluster.", - "This pattern works with either a Provisioned or Serverless MSK cluster as long as the cluster is configured to use IAM authentication. ", - "For detailed deployment instructions instructions see the README " + "This pattern provides a Lambda function along with an Event Source Mapping to a Kafka topic.", + "It requires that you already have an Amazon Managed Streaming for Kafka(Amazon MSK) cluster setup with a Kafka topic created.", + "The CloudFormation template provided in this pattern installs an MSK Cluster and creates a Kafka topic.", + "This pattern works with either a Provisioned or Serverless MSK cluster as long as the cluster is configured to use IAM authentication. ", + "For detailed deployment instructions instructions see the README.md" ] }, "gitHub": { @@ -24,10 +24,6 @@ }, "resources": { "bullets": [ - { - "text": "Amazon MSK Cluster pattern", - "link": "https://serverlessland.com/patterns/msk-cfn-sasl-lambda" - }, { "text": "Using AWS Lambda with Amazon MSK", "link": "https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html" @@ -58,6 +54,11 @@ ] }, "authors": [ + { + "name": "Indranil Banerjee", + "bio": "AWS - Senior Solutions Architect", + "linkedin": "https://www.linkedin.com/in/indranil-banerjee-b00a261/" + }, { "name": "Vaibhav Jain", "bio": "AWS - Sr. Application Architect", @@ -70,7 +71,7 @@ "image": "https://www.fintail.me/images/pa.jpg", "linkedin": "https://www.linkedin.com/in/pallam/" }, - { + { "name": "Suraj Tripathi", "bio": "AWS - AppDev Cloud Consultant", "linkedin": "https://www.linkedin.com/in/suraj-tripathi-01b49a140/" @@ -79,11 +80,6 @@ "name": "Adam Wagner", "bio": "AWS - Principal Serverless Solutions Architect", "linkedin": "https://www.linkedin.com/in/adam-wagner-4bb412/" - }, - { - "name": "Indranil Banerjee", - "bio": "AWS - Senior Solutions Architect", - "linkedin": "https://www.linkedin.com/in/indranil-banerjee-b00a261/" } ] }