diff --git a/msk-lambda-schema-avro-java-sam/MSKAndKafkaClientEC2.yaml b/msk-lambda-schema-avro-java-sam/MSKAndKafkaClientEC2.yaml new file mode 100644 index 000000000..53cb70b51 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/MSKAndKafkaClientEC2.yaml @@ -0,0 +1,1347 @@ +AWSTemplateFormatVersion: '2010-09-09' +Parameters: + EnvType: + Description: MSK Cluster Type. + Default: Provisioned + Type: String + AllowedValues: + - Serverless + - Provisioned + ConstraintDescription: Must specify Serverless or Provisioned. + LatestAmiId: + Type: 'AWS::SSM::Parameter::Value' + Default: '/aws/service/ami-amazon-linux-latest/al2023-ami-kernel-6.1-x86_64' + MSKKafkaVersion: + Type: String + Default: 3.9.x + JavaVersion: + Type: String + Description: Choose the version of Java. Lambda currently supports Java 11, 17 and 21 + AllowedValues: + - java11 + - java17 + - java21 + Default: java21 + ApacheKafkaInstallerLocation: + Type: String + Default: https://dlcdn.apache.org/kafka/3.9.1/kafka_2.13-3.9.1.tgz + KafkaTopicForLambda: + Type: String + Default: MskIamJavaLambdaTopic + ServerlessLandGithubLocation: + Type: String + Default: https://github.com/aws-samples/serverless-patterns/ + ContactSchemaName: + Type: String + Default: ContactSchema + GlueSchemaRegistryForMSKName: + Type: String + Default: GlueSchemaRegistryForMSK + +Conditions: + CreateProvisionedCluster: !Equals + - !Ref EnvType + - Provisioned + CreateServerlessCluster: !Equals + - !Ref EnvType + - Serverless +Mappings: + SubnetConfig: + VPC: + CIDR: '10.0.0.0/16' + PublicOne: + CIDR: '10.0.0.0/24' + PrivateSubnetMSKOne: + CIDR: '10.0.1.0/24' + PrivateSubnetMSKTwo: + CIDR: '10.0.2.0/24' + PrivateSubnetMSKThree: + CIDR: '10.0.3.0/24' +Resources: + VPC: + Type: AWS::EC2::VPC + Properties: + EnableDnsSupport: true + EnableDnsHostnames: true + CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR'] + Tags: + - Key: 'Name' + Value: 'MSKVPC' + + PublicSubnetOne: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PublicOne', 'CIDR'] + MapPublicIpOnLaunch: true + Tags: + - Key: 'Name' + Value: 'PublicSubnet' + PrivateSubnetMSKOne: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKOne', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKOne' + PrivateSubnetMSKTwo: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 1 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKTwo', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKTwo' + PrivateSubnetMSKThree: + Type: AWS::EC2::Subnet + Properties: + AvailabilityZone: + Fn::Select: + - 2 + - Fn::GetAZs: {Ref: 'AWS::Region'} + VpcId: !Ref 'VPC' + CidrBlock: !FindInMap ['SubnetConfig', 'PrivateSubnetMSKThree', 'CIDR'] + MapPublicIpOnLaunch: false + Tags: + - Key: 'Name' + Value: 'PrivateSubnetMSKThree' + + InternetGateway: + Type: AWS::EC2::InternetGateway + GatewayAttachement: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + VpcId: !Ref 'VPC' + InternetGatewayId: !Ref 'InternetGateway' + + NATEIP: + Type: AWS::EC2::EIP + DependsOn: GatewayAttachement + Properties: + Domain: vpc + + NATGateway: + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt NATEIP.AllocationId + SubnetId: !Ref 'PublicSubnetOne' + Tags: + - Key: 'Name' + Value: 'ConfluentKafkaNATGateway' + + PublicRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref 'VPC' + PublicRoute: + Type: AWS::EC2::Route + DependsOn: GatewayAttachement + Properties: + RouteTableId: !Ref 'PublicRouteTable' + DestinationCidrBlock: '0.0.0.0/0' + GatewayId: !Ref 'InternetGateway' + PublicSubnetOneRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PublicSubnetOne + RouteTableId: !Ref PublicRouteTable + + PrivateRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref 'VPC' + + PrivateRoute: + Type: AWS::EC2::Route + DependsOn: NATGateway + Properties: + RouteTableId: !Ref 'PrivateRouteTable' + DestinationCidrBlock: '0.0.0.0/0' + NatGatewayId: !Ref 'NATGateway' + + PrivateSubnetMSKOneRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKOne + PrivateSubnetMSKTwoRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKTwo + PrivateSubnetMSKThreeRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnetMSKThree + + KafkaClientInstanceSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Enable SSH access via port 22 from BastionHostSecurityGroup + GroupName: !Sub "${AWS::StackName} Security group attached to the kakfa client producer" + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3500 + ToPort: 3500 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3600 + ToPort: 3600 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3800 + ToPort: 3800 + CidrIp: 10.0.0.0/24 + - IpProtocol: tcp + FromPort: 3900 + ToPort: 3900 + CidrIp: 10.0.0.0/24 + + MSKSecurityGroup: + Type: AWS::EC2::SecurityGroup + DependsOn: [VPC,KafkaClientInstanceSecurityGroup] + Properties: + GroupDescription: MSK Security Group + GroupName: !Sub "${AWS::StackName} Security group for the MSK cluster" + VpcId: !Ref 'VPC' + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 2181 + ToPort: 2181 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9094 + ToPort: 9094 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9096 + ToPort: 9096 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9092 + ToPort: 9092 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 9098 + ToPort: 9098 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 8083 + ToPort: 8083 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + - IpProtocol: tcp + FromPort: 8081 + ToPort: 8081 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + + MSKSelfIngressAllowRule: + Type: AWS::EC2::SecurityGroupIngress + DependsOn: MSKSecurityGroup + Properties: + GroupId: !GetAtt MSKSecurityGroup.GroupId + Description: Enable Self referencing Bootstrap servers + IpProtocol: tcp + FromPort: 9092 + ToPort: 9098 + SourceSecurityGroupId: !GetAtt MSKSecurityGroup.GroupId + + KafkaClientSelfIngressAllowRule: + Type: AWS::EC2::SecurityGroupIngress + DependsOn: KafkaClientInstanceSecurityGroup + Properties: + GroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + IpProtocol: tcp + FromPort: 22 + ToPort: 22 + SourceSecurityGroupId: !GetAtt KafkaClientInstanceSecurityGroup.GroupId + + MSKGlueRegistry: + Type: AWS::Glue::Registry + Properties: + Name: !Ref GlueSchemaRegistryForMSKName + Description: "Registry for storing schemas related to MSK" + + ContactSchema: + Type: AWS::Glue::Schema + Properties: + Name: !Ref ContactSchemaName + Compatibility: BACKWARD + DataFormat: AVRO + Registry: + Arn: !GetAtt MSKGlueRegistry.Arn + SchemaDefinition: | + { + "type": "record", + "name": "Contact", + "fields": [ + {"name": "firstname", "type": "string"}, + {"name": "lastname", "type": "string"}, + {"name": "company", "type": "string"}, + {"name": "street", "type": "string"}, + {"name": "city", "type": "string"}, + {"name": "county", "type": "string"}, + {"name": "state", "type": "string"}, + {"name": "zip", "type": "string"}, + {"name": "homePhone", "type": "string"}, + {"name": "cellPhone", "type": "string"}, + {"name": "email", "type": "string"}, + {"name": "website", "type": "string"} + ] + } + + KafkaClientEC2InstanceProvisioned: + Condition: CreateProvisionedCluster + DependsOn: MSKCluster + Type: AWS::EC2::Instance + Properties: + InstanceType: m5.large + IamInstanceProfile: !Ref EC2InstanceProfile + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + SubnetId: !Ref PublicSubnetOne + SecurityGroupIds: [!GetAtt KafkaClientInstanceSecurityGroup.GroupId] + ImageId: !Ref LatestAmiId + Tags: + - Key: 'Name' + Value: 'KafkaClientInstance' + BlockDeviceMappings: + - DeviceName: /dev/xvda + Ebs: + VolumeSize: 50 + VolumeType: gp2 + DeleteOnTermination: true + UserData: + Fn::Base64: + !Sub + - | + #!/bin/bash + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum update -y + # yum install java-openjdk11-devel -y + + # install Java + JAVA_VERSION=${java_version} + echo "JAVA_VERSION=$JAVA_VERSION" >> /home/ec2-user/.bash_profile + if [ "$JAVA_VERSION" == "java11" ]; then + sudo yum install java-11-amazon-corretto-devel -y + elif [ "$JAVA_VERSION" == "java17" ]; then + sudo yum install java-17-amazon-corretto-devel -y + elif [ "$JAVA_VERSION" == "java21" ]; then + sudo yum install java-21-amazon-corretto-devel -y + else + sudo yum install java-21-amazon-corretto-devel -y + fi + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of Java succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install nmap-ncat -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of nmap succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install git -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of git succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum erase awscli -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum erase of awscli succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install jq -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of jq succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install -y docker + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of docker succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + service docker start + usermod -a -G docker ec2-user + sudo wget https://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo -O /etc/yum.repos.d/epel-apache-maven.repo + sudo sed -i s/\$releasever/6/g /etc/yum.repos.d/epel-apache-maven.repo + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install -y apache-maven + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of maven succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + cd /home/ec2-user + su -c "ln -s /usr/bin/python3.8 /usr/bin/python3" -s /bin/sh ec2-user + su -c "pip3 install boto3 --user" -s /bin/sh ec2-user + su -c "pip3 install kafka-python --user" -s /bin/sh ec2-user + + # install AWS CLI 2 - access with aws2 + cd /home/ec2-user + mkdir -p awscli + cd awscli + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + # Create dirs, get Apache Kafka and unpack it + cd /home/ec2-user + KAFKA_VERSION=${msk_kafka_version} + KAFKA_FOLDER_VERSION=$(echo "$KAFKA_VERSION" | tr -d '.') + KAFKA_FOLDER='Kafka'$KAFKA_FOLDER_VERSION + mkdir -p $KAFKA_FOLDER + mkdir -p /tmp/kafka + ln -s /home/ec2-user/$KAFKA_FOLDER /home/ec2-user/kafka + cd $KAFKA_FOLDER + APACHE_KAFKA_INSTALLER_LOCATION=${apache_kafka_installer_location} + wget $APACHE_KAFKA_INSTALLER_LOCATION + APACHE_KAFKA_INSTALLER_FILE=$(echo "$APACHE_KAFKA_INSTALLER_LOCATION" | awk -F "/" '{print $NF}') + tar -xzf $APACHE_KAFKA_INSTALLER_FILE --strip 1 + cd libs + wget https://github.com/aws/aws-msk-iam-auth/releases/download/v2.2.0/aws-msk-iam-auth-2.2.0-all.jar + cd ../bin + echo "security.protocol=SASL_SSL" > client.properties + echo "sasl.mechanism=AWS_MSK_IAM" >> client.properties + echo "sasl.jaas.config=software.amazon.msk.auth.iam.IAMLoginModule required;" >> client.properties + echo "sasl.client.callback.handler.class=software.amazon.msk.auth.iam.IAMClientCallbackHandler" >> client.properties + + # Install AWS SAM CLI + cd /home/ec2-user + mkdir -p awssam + cd awssam + wget https://github.com/aws/aws-sam-cli/releases/latest/download/aws-sam-cli-linux-x86_64.zip + unzip aws-sam-cli-linux-x86_64.zip -d sam-installation + sudo ./sam-installation/install + + # Create command files for creating Kafka Topic and Kafka Producer + cd /home/ec2-user + MSK_CLUSTER_ARN=${msk_cluster_arn} + KAFKA_TOPIC=${kafka_topic_for_lambda} + echo "#!/bin/bash" > kafka_topic_creator.sh + sudo chmod +x kafka_topic_creator.sh + echo "MSK_CLUSTER_ARN=$MSK_CLUSTER_ARN" >> kafka_topic_creator.sh + AWS_REGION=${aws_region} + echo "AWS_REGION=$AWS_REGION" >> kafka_topic_creator.sh + echo "BOOTSTRAP_BROKERS_IAM=\$(aws kafka get-bootstrap-brokers --region \$AWS_REGION --cluster-arn \$MSK_CLUSTER_ARN --query 'BootstrapBrokerStringSaslIam' --output text)" >> kafka_topic_creator.sh + echo "sleep 5" >> kafka_topic_creator.sh + echo "KAFKA_TOPIC=$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "/home/ec2-user/kafka/bin/kafka-topics.sh --create --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --command-config /home/ec2-user/kafka/bin/client.properties --replication-factor 3 --partitions 3 --topic \$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "echo \"export MSK_CLUSTER_ARN=\$MSK_CLUSTER_ARN\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export AWS_REGION=\$AWS_REGION\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export BOOTSTRAP_BROKERS_IAM=\$BOOTSTRAP_BROKERS_IAM\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export KAFKA_TOPIC=\$KAFKA_TOPIC\" >> .bash_profile" >> kafka_topic_creator.sh + echo "#!/bin/bash" > kafka_message_sender.sh + echo "source /home/ec2-user/.bash_profile" >> kafka_message_sender.sh + echo "/home/ec2-user/kafka/bin/kafka-console-producer.sh --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --producer.config /home/ec2-user/kafka/bin/client.properties --topic $KAFKA_TOPIC" >> kafka_message_sender.sh + sudo chmod +x kafka_message_sender.sh + CLUSTER_NAME="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f2)" + CLUSTER_ID="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f3)" + echo "export CLUSTER_NAME=$CLUSTER_NAME" >> /home/ec2-user/.bash_profile + echo "export CLUSTER_ID=$CLUSTER_ID" >> /home/ec2-user/.bash_profile + ./kafka_topic_creator.sh > kafka_topic_creator_output.txt + + #Checkout Serverless Patterns from Github + cd /home/ec2-user + SERVERLESS_LAND_GITHUB_LOCATION=${serverless_land_github_location} + git clone $SERVERLESS_LAND_GITHUB_LOCATION + cd ./serverless-patterns/msk-lambda-schema-avro-java-sam + cp template_original.yaml template.yaml + sudo chown -R ec2-user . + + GLUE_SCHEMA_REGISTRY_NAME=${glue_registry_name} + CONTACT_SCHEMA=${contact_schema_name} + VPC_ID=${vpcid} + LAMBDA_SECURITY_GROUP_ID=${securitygroup} + PRIVATE_SUBNET_1=${privatesubnetone} + PRIVATE_SUBNET_2=${privatesubnettwo} + PRIVATE_SUBNET_3=${privatesubnetthree} + SUBNET_IDS="$PRIVATE_SUBNET_1,$PRIVATE_SUBNET_2,$PRIVATE_SUBNET_3" + + source /home/ec2-user/.bash_profile + sed -i "s/CLUSTER_NAME/$CLUSTER_NAME/g" template.yaml + sed -i "s/CLUSTER_ID/$CLUSTER_ID/g" template.yaml + sed -i "s/KAFKA_TOPIC/$KAFKA_TOPIC/g" template.yaml + sed -i "s/JAVA_VERSION/$JAVA_VERSION/g" template.yaml + sed -i "s/GLUE_SCHEMA_REGISTRY_NAME/$GLUE_SCHEMA_REGISTRY_NAME/g" template.yaml + sed -i "s/AVRO_SCHEMA/$CONTACT_SCHEMA/g" template.yaml + sed -i "s/VPC_ID/$VPC_ID/g" template.yaml + sed -i "s/LAMBDA_SECURITY_GROUP_ID/$LAMBDA_SECURITY_GROUP_ID/g" template.yaml + sed -i "s/SUBNET_IDS/$SUBNET_IDS/g" template.yaml + + # Get IP CIDR range for EC2 Instance Connect + cd /home/ec2-user + mkdir -p ip_prefix + cd ip_prefix + git clone https://github.com/joetek/aws-ip-ranges-json.git + cd aws-ip-ranges-json + AWS_REGION=${aws_region} + EC2_CONNECT_IP=$(cat ip-ranges-ec2-instance-connect.json | jq -r --arg AWS_REGION "$AWS_REGION" '.prefixes[] | select(.region==$AWS_REGION).ip_prefix') + echo "export EC2_CONNECT_IP=$EC2_CONNECT_IP" >> /home/ec2-user/.bash_profile + SECURITY_GROUP=${security_group_id} + echo "export SECURITY_GROUP=$SECURITY_GROUP" >> /home/ec2-user/.bash_profile + aws ec2 authorize-security-group-ingress --region $AWS_REGION --group-id $SECURITY_GROUP --protocol tcp --port 22 --cidr $EC2_CONNECT_IP + + - security_group_id : !GetAtt KafkaClientInstanceSecurityGroup.GroupId + msk_cluster_arn : !GetAtt MSKCluster.Arn + kafka_topic_for_lambda : !Ref KafkaTopicForLambda + msk_kafka_version: !Ref MSKKafkaVersion + apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation + serverless_land_github_location: !Ref ServerlessLandGithubLocation + aws_region: !Ref 'AWS::Region' + java_version: !Ref JavaVersion + vpcid: !Ref VPC + privatesubnetone: !Ref PrivateSubnetMSKOne + privatesubnettwo: !Ref PrivateSubnetMSKTwo + privatesubnetthree: !Ref PrivateSubnetMSKThree + securitygroup: !GetAtt MSKSecurityGroup.GroupId + glue_registry_name: !Ref GlueSchemaRegistryForMSKName + contact_schema_name: !Ref ContactSchemaName + + + KafkaClientEC2InstanceServerless: + Condition: CreateServerlessCluster + DependsOn: ServerlessMSKCluster + Type: AWS::EC2::Instance + Properties: + InstanceType: m5.large + IamInstanceProfile: !Ref EC2InstanceProfile + AvailabilityZone: + Fn::Select: + - 0 + - Fn::GetAZs: {Ref: 'AWS::Region'} + SubnetId: !Ref PublicSubnetOne + SecurityGroupIds: [!GetAtt KafkaClientInstanceSecurityGroup.GroupId] + ImageId: !Ref LatestAmiId + Tags: + - Key: 'Name' + Value: 'KafkaClientInstance' + BlockDeviceMappings: + - DeviceName: /dev/xvda + Ebs: + VolumeSize: 50 + VolumeType: gp2 + DeleteOnTermination: true + UserData: + Fn::Base64: + !Sub + - | + #!/bin/bash + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum update -y + # yum install java-openjdk11-devel -y + + # install Java + JAVA_VERSION=${java_version} + echo "JAVA_VERSION=$JAVA_VERSION" >> /home/ec2-user/.bash_profile + if [ "$JAVA_VERSION" == "java11" ]; then + sudo yum install java-11-amazon-corretto-devel -y + elif [ "$JAVA_VERSION" == "java17" ]; then + sudo yum install java-17-amazon-corretto-devel -y + elif [ "$JAVA_VERSION" == "java21" ]; then + sudo yum install java-21-amazon-corretto-devel -y + else + sudo yum install java-21-amazon-corretto-devel -y + fi + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of Java succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install nmap-ncat -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of nmap succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install git -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of git succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum erase awscli -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum erase of awscli succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + yum install jq -y + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of jq succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install -y docker + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of docker succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + service docker start + usermod -a -G docker ec2-user + sudo wget https://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo -O /etc/yum.repos.d/epel-apache-maven.repo + sudo sed -i s/\$releasever/6/g /etc/yum.repos.d/epel-apache-maven.repo + + max_attempts=5 + attempt_num=1 + success=false + while [ $success = false ] && [ $attempt_num -le $max_attempts ]; do + echo "Trying yum install" + sudo yum install -y apache-maven + # Check the exit code of the command + if [ $? -eq 0 ]; then + echo "Yum install of maven succeeded" + success=true + else + echo "Attempt $attempt_num failed. Sleeping for 3 seconds and trying again..." + sleep 3 + ((attempt_num++)) + fi + done + + cd /home/ec2-user + su -c "ln -s /usr/bin/python3.8 /usr/bin/python3" -s /bin/sh ec2-user + su -c "pip3 install boto3 --user" -s /bin/sh ec2-user + su -c "pip3 install kafka-python --user" -s /bin/sh ec2-user + + # install AWS CLI 2 - access with aws2 + cd /home/ec2-user + mkdir -p awscli + cd awscli + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + # Create dirs, get Apache Kafka and unpack it + cd /home/ec2-user + KAFKA_VERSION=${msk_kafka_version} + KAFKA_FOLDER_VERSION=$(echo "$KAFKA_VERSION" | tr -d '.') + KAFKA_FOLDER='Kafka'$KAFKA_FOLDER_VERSION + mkdir -p $KAFKA_FOLDER + mkdir -p /tmp/kafka + ln -s /home/ec2-user/$KAFKA_FOLDER /home/ec2-user/kafka + cd $KAFKA_FOLDER + APACHE_KAFKA_INSTALLER_LOCATION=${apache_kafka_installer_location} + wget $APACHE_KAFKA_INSTALLER_LOCATION + APACHE_KAFKA_INSTALLER_FILE=$(echo "$APACHE_KAFKA_INSTALLER_LOCATION" | awk -F "/" '{print $NF}') + tar -xzf $APACHE_KAFKA_INSTALLER_FILE --strip 1 + cd libs + wget https://github.com/aws/aws-msk-iam-auth/releases/download/v2.2.0/aws-msk-iam-auth-2.2.0-all.jar + cd ../bin + echo "security.protocol=SASL_SSL" > client.properties + echo "sasl.mechanism=AWS_MSK_IAM" >> client.properties + echo "sasl.jaas.config=software.amazon.msk.auth.iam.IAMLoginModule required;" >> client.properties + echo "sasl.client.callback.handler.class=software.amazon.msk.auth.iam.IAMClientCallbackHandler" >> client.properties + + # Install AWS SAM CLI + cd /home/ec2-user + mkdir -p awssam + cd awssam + wget https://github.com/aws/aws-sam-cli/releases/latest/download/aws-sam-cli-linux-x86_64.zip + unzip aws-sam-cli-linux-x86_64.zip -d sam-installation + sudo ./sam-installation/install + + # Create command files for creating Kafka Topic and Kafka Producer + cd /home/ec2-user + MSK_CLUSTER_ARN=${msk_cluster_arn} + KAFKA_TOPIC=${kafka_topic_for_lambda} + echo "#!/bin/bash" > kafka_topic_creator.sh + sudo chmod +x kafka_topic_creator.sh + echo "MSK_CLUSTER_ARN=$MSK_CLUSTER_ARN" >> kafka_topic_creator.sh + AWS_REGION=${aws_region} + echo "AWS_REGION=$AWS_REGION" >> kafka_topic_creator.sh + echo "BOOTSTRAP_BROKERS_IAM=\$(aws kafka get-bootstrap-brokers --region \$AWS_REGION --cluster-arn \$MSK_CLUSTER_ARN --query 'BootstrapBrokerStringSaslIam' --output text)" >> kafka_topic_creator.sh + echo "sleep 5" >> kafka_topic_creator.sh + echo "KAFKA_TOPIC=$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "/home/ec2-user/kafka/bin/kafka-topics.sh --create --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --command-config /home/ec2-user/kafka/bin/client.properties --replication-factor 3 --partitions 3 --topic \$KAFKA_TOPIC" >> kafka_topic_creator.sh + echo "echo \"export MSK_CLUSTER_ARN=\$MSK_CLUSTER_ARN\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export AWS_REGION=\$AWS_REGION\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export BOOTSTRAP_BROKERS_IAM=\$BOOTSTRAP_BROKERS_IAM\" >> .bash_profile" >> kafka_topic_creator.sh + echo "echo \"export KAFKA_TOPIC=\$KAFKA_TOPIC\" >> .bash_profile" >> kafka_topic_creator.sh + echo "#!/bin/bash" > kafka_message_sender.sh + echo "source /home/ec2-user/.bash_profile" >> kafka_message_sender.sh + echo "/home/ec2-user/kafka/bin/kafka-console-producer.sh --bootstrap-server \$BOOTSTRAP_BROKERS_IAM --producer.config /home/ec2-user/kafka/bin/client.properties --topic $KAFKA_TOPIC" >> kafka_message_sender.sh + sudo chmod +x kafka_message_sender.sh + CLUSTER_NAME="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f2)" + CLUSTER_ID="$(echo $MSK_CLUSTER_ARN | cut -d '/' -f3)" + echo "export CLUSTER_NAME=$CLUSTER_NAME" >> /home/ec2-user/.bash_profile + echo "export CLUSTER_ID=$CLUSTER_ID" >> /home/ec2-user/.bash_profile + ./kafka_topic_creator.sh > kafka_topic_creator_output.txt + + #Checkout Serverless Patterns from Github + cd /home/ec2-user + SERVERLESS_LAND_GITHUB_LOCATION=${serverless_land_github_location} + git clone $SERVERLESS_LAND_GITHUB_LOCATION + cd ./serverless-patterns/msk-lambda-schema-avro-java-sam + cp template_original.yaml template.yaml + sudo chown -R ec2-user . + + GLUE_SCHEMA_REGISTRY_NAME=${glue_registry_name} + CONTACT_SCHEMA=${contact_schema_name} + VPC_ID=${vpcid} + LAMBDA_SECURITY_GROUP_ID=${securitygroup} + PRIVATE_SUBNET_1=${privatesubnetone} + PRIVATE_SUBNET_2=${privatesubnettwo} + PRIVATE_SUBNET_3=${privatesubnetthree} + SUBNET_IDS="$PRIVATE_SUBNET_1,$PRIVATE_SUBNET_2,$PRIVATE_SUBNET_3" + + + source /home/ec2-user/.bash_profile + sed -i "s/CLUSTER_NAME/$CLUSTER_NAME/g" template.yaml + sed -i "s/CLUSTER_ID/$CLUSTER_ID/g" template.yaml + sed -i "s/KAFKA_TOPIC/$KAFKA_TOPIC/g" template.yaml + sed -i "s/JAVA_VERSION/$JAVA_VERSION/g" template.yaml + sed -i "s/GLUE_SCHEMA_REGISTRY_NAME/$GLUE_SCHEMA_REGISTRY_NAME/g" template.yaml + sed -i "s/AVRO_SCHEMA/$CONTACT_SCHEMA/g" template.yaml + sed -i "s/VPC_ID/$VPC_ID/g" template.yaml + sed -i "s/LAMBDA_SECURITY_GROUP_ID/$LAMBDA_SECURITY_GROUP_ID/g" template.yaml + sed -i "s/SUBNET_IDS/$SUBNET_IDS/g" template.yaml + + # Get IP CIDR range for EC2 Instance Connect + cd /home/ec2-user + mkdir -p ip_prefix + cd ip_prefix + git clone https://github.com/joetek/aws-ip-ranges-json.git + cd aws-ip-ranges-json + AWS_REGION=${aws_region} + EC2_CONNECT_IP=$(cat ip-ranges-ec2-instance-connect.json | jq -r --arg AWS_REGION "$AWS_REGION" '.prefixes[] | select(.region==$AWS_REGION).ip_prefix') + echo "export EC2_CONNECT_IP=$EC2_CONNECT_IP" >> /home/ec2-user/.bash_profile + SECURITY_GROUP=${security_group_id} + echo "export SECURITY_GROUP=$SECURITY_GROUP" >> /home/ec2-user/.bash_profile + aws ec2 authorize-security-group-ingress --region $AWS_REGION --group-id $SECURITY_GROUP --protocol tcp --port 22 --cidr $EC2_CONNECT_IP + + - security_group_id : !GetAtt KafkaClientInstanceSecurityGroup.GroupId + msk_cluster_arn : !GetAtt ServerlessMSKCluster.Arn + kafka_topic_for_lambda : !Ref KafkaTopicForLambda + msk_kafka_version: !Ref MSKKafkaVersion + apache_kafka_installer_location: !Ref ApacheKafkaInstallerLocation + serverless_land_github_location: !Ref ServerlessLandGithubLocation + aws_region: !Ref 'AWS::Region' + java_version: !Ref JavaVersion + vpcid: !Ref VPC + privatesubnetone: !Ref PrivateSubnetMSKOne + privatesubnettwo: !Ref PrivateSubnetMSKTwo + privatesubnetthree: !Ref PrivateSubnetMSKThree + securitygroup: !GetAtt MSKSecurityGroup.GroupId + glue_registry_name: !Ref GlueSchemaRegistryForMSKName + contact_schema_name: !Ref ContactSchemaName + + + + EC2InstanceEndpoint: + Type: AWS::EC2::InstanceConnectEndpoint + Properties: + PreserveClientIp: true + SecurityGroupIds: + - !GetAtt KafkaClientInstanceSecurityGroup.GroupId + SubnetId: !Ref PublicSubnetOne + + EC2Role: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Sid: '' + Effect: Allow + Principal: + Service: ec2.amazonaws.com + Action: 'sts:AssumeRole' + Path: "/" + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonMSKFullAccess + - arn:aws:iam::aws:policy/AWSCloudFormationFullAccess + - arn:aws:iam::aws:policy/CloudWatchFullAccess + - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - arn:aws:iam::aws:policy/AmazonS3FullAccess + - arn:aws:iam::aws:policy/AWSCertificateManagerPrivateCAFullAccess + - arn:aws:iam::aws:policy/IAMFullAccess + - arn:aws:iam::aws:policy/AWSLambda_FullAccess + - arn:aws:iam::aws:policy/AmazonSQSFullAccess + Policies: + - PolicyName: MSKConfigurationAccess + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": "kafka:CreateConfiguration", + "Resource": "*" + } + ] + }' + - PolicyName: CloudformationDeploy + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "iam:*" + ], + "Resource": "*" + } + ] + }' + - PolicyName: MSKProducerPermissions + PolicyDocument: + Version: 2012-10-17 + Statement: + - Sid: SecretsAccess + Effect: Allow + Action: + - 'secretsmanager:*' + - 'kms:*' + Resource: '*' + - Sid: SQSPermissions + Effect: Allow + Action: + - 'sqs:CreateQueue' + - 'sqs:DeleteQueue' + - 'sqs:SetQueueAttributes' + - 'sqs:GetQueueAttributes' + - 'sqs:GetQueueUrl' + - 'sqs:TagQueue' + - 'sqs:ListQueues' + - 'sqs:ListQueueTags' + Resource: '*' + - Sid: GlueAndIAMPermissions + Effect: Allow + Action: + - 'glue:*Schema*' + - 'iam:CreatePolicy' + - 'iam:Tag*' + - 'iam:AttachRolePolicy' + Resource: '*' + - PolicyName: MSKConnectAuthentication + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:*Topic*", + "kafka-cluster:Connect", + "kafka-cluster:AlterCluster", + "kafka-cluster:DescribeCluster", + "kafka-cluster:DescribeClusterDynamicConfiguration" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:cluster/${AWS::StackName}-cluster/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:*Topic*", + "kafka-cluster:WriteData", + "kafka-cluster:ReadData" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:topic/${AWS::StackName}-cluster/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kafka-cluster:AlterGroup", + "kafka-cluster:DescribeGroup" + ], + "Resource": [ + "arn:aws:kafka:${AWS::Region}:${AWS::AccountId}:group/${AWS::StackName}-cluster/*" + ] + } + ] + }' + - PolicyName: SecurityGroupsPolicy + PolicyDocument: !Sub '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeSecurityGroups", + "ec2:DescribeSecurityGroupRules", + "ec2:DescribeTags" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:RevokeSecurityGroupEgress", + "ec2:ModifySecurityGroupRules", + "ec2:UpdateSecurityGroupRuleDescriptionsIngress", + "ec2:UpdateSecurityGroupRuleDescriptionsEgress" + ], + "Resource": [ + "arn:aws:ec2:${AWS::Region}:${AWS::AccountId}:security-group/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:ModifySecurityGroupRules" + ], + "Resource": [ + "arn:aws:ec2:${AWS::Region}:${AWS::AccountId}:security-group-rule/*" + ] + } + ] + }' + + EC2InstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + InstanceProfileName: !Join + - '-' + - - 'EC2MMMSKCFProfile' + - !Ref 'AWS::StackName' + Roles: + - !Ref EC2Role + + + MSKCertAuthority: + Type: AWS::ACMPCA::CertificateAuthority + Condition: CreateProvisionedCluster + Properties: + KeyAlgorithm: "RSA_4096" + SigningAlgorithm: "SHA256WITHRSA" + Subject: + Country: "US" + Type: "ROOT" + + MSKCert: + Type: AWS::ACMPCA::Certificate + Condition: CreateProvisionedCluster + Properties: + CertificateAuthorityArn: !Ref MSKCertAuthority + CertificateSigningRequest: !GetAtt + - MSKCertAuthority + - CertificateSigningRequest + SigningAlgorithm: "SHA256WITHRSA" + TemplateArn: arn:aws:acm-pca:::template/RootCACertificate/V1 + Validity: + Type: YEARS + Value: 10 + + RootCAActivation: + Type: AWS::ACMPCA::CertificateAuthorityActivation + Condition: CreateProvisionedCluster + Properties: + CertificateAuthorityArn: + Ref: MSKCertAuthority + Certificate: + Fn::GetAtt: + - MSKCert + - Certificate + Status: ACTIVE + + RootCAPermission: + Type: AWS::ACMPCA::Permission + Condition: CreateProvisionedCluster + Properties: + Actions: + - IssueCertificate + - GetCertificate + - ListPermissions + CertificateAuthorityArn: !Ref MSKCertAuthority + Principal: acm.amazonaws.com + + CredentialsKMSKey: + Type: AWS::KMS::Key + Condition: CreateProvisionedCluster + Properties: + Description: "KMS key to use with credentials secret with KMS" + EnableKeyRotation: True + KeyPolicy: + Version: "2012-10-17" + Id: key-default-1 + Statement: + - Sid: Enable IAM User Permissions + Effect: Allow + Principal: + AWS: !Join + - '' + - - 'arn:aws:iam::' + - !Ref 'AWS::AccountId' + - ':root' + Action: 'kms:*' + Resource: '*' + - Sid: Enable Secret Manager Permissions + Effect: Allow + Principal: + AWS: "*" + Action: + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: '*' + Condition: + StringEquals: + kms:CallerAccount: !Ref 'AWS::AccountId' + kms:ViaService: !Join + - '' + - - 'secretsmanager.' + - !Ref 'AWS::Region' + - '.amazonaws.com' + PendingWindowInDays: 7 + + CredentialsKMSKeyAlias: + Type: AWS::KMS::Alias + Condition: CreateProvisionedCluster + Properties: + AliasName: alias/mskstack_secret_manager_key + TargetKeyId: !Ref 'CredentialsKMSKey' + + CredentialsSecret: + Type: AWS::SecretsManager::Secret + Condition: CreateProvisionedCluster + Properties: + Description: "Secret to use for SCRAM Auth" + Name: "AmazonMSK_Credentials" + GenerateSecretString: + SecretStringTemplate: '{"username": "test-user"}' + GenerateStringKey: "password" + PasswordLength: 30 + ExcludeCharacters: '"@/\' + KmsKeyId: !Ref 'CredentialsKMSKey' + + MSKConfiguration: + Type: AWS::MSK::Configuration + Condition: CreateProvisionedCluster + Properties: + Description: "MSKConfiguration" + Name: "MSKConfiguration" + ServerProperties: | + auto.create.topics.enable=true + default.replication.factor=3 + min.insync.replicas=2 + num.io.threads=8 + num.network.threads=5 + num.partitions=1 + num.replica.fetchers=2 + replica.lag.time.max.ms=30000 + socket.receive.buffer.bytes=102400 + socket.request.max.bytes=104857600 + socket.send.buffer.bytes=102400 + unclean.leader.election.enable=true + zookeeper.session.timeout.ms=18000 + delete.topic.enable=true + log.retention.hours=8 + + MSKCluster: + Type: AWS::MSK::Cluster + Condition: CreateProvisionedCluster + Properties: + BrokerNodeGroupInfo: + ClientSubnets: + - !Ref PrivateSubnetMSKOne + - !Ref PrivateSubnetMSKTwo + - !Ref PrivateSubnetMSKThree + SecurityGroups: + - !GetAtt MSKSecurityGroup.GroupId + InstanceType: "kafka.m5.large" + StorageInfo: + EBSStorageInfo: + VolumeSize: 100 + ClientAuthentication: + Unauthenticated: + Enabled: False + Sasl: + Iam: + Enabled: True + Scram: + Enabled: True + Tls: + CertificateAuthorityArnList: + - !Ref MSKCertAuthority + Enabled: True + ClusterName: !Sub "${AWS::StackName}-cluster" + ConfigurationInfo: + Arn: !Ref MSKConfiguration + Revision: 1 + EncryptionInfo: + EncryptionInTransit: + ClientBroker: TLS + InCluster: True + KafkaVersion: !Ref MSKKafkaVersion + NumberOfBrokerNodes: 3 + + SecretMSKAssociation: + Type: AWS::MSK::BatchScramSecret + Condition: CreateProvisionedCluster + Properties: + ClusterArn: !Ref MSKCluster + SecretArnList: + - !Ref CredentialsSecret + + ServerlessMSKCluster: + Type: AWS::MSK::ServerlessCluster + Condition: CreateServerlessCluster + Properties: + ClientAuthentication: + Sasl: + Iam: + Enabled: True + ClusterName: !Sub "${AWS::StackName}-cluster" + VpcConfigs: + - SubnetIds: + - !Ref PrivateSubnetMSKOne + - !Ref PrivateSubnetMSKTwo + - !Ref PrivateSubnetMSKThree + SecurityGroups: + - !GetAtt MSKSecurityGroup.GroupId + + + + +Outputs: + VPCId: + Description: The ID of the VPC created + Value: !Ref 'VPC' + Export: + Name: !Sub "${AWS::StackName}-VPCID" + PublicSubnetOne: + Description: The name of the public subnet created + Value: !Ref 'PublicSubnetOne' + Export: + Name: !Sub "${AWS::StackName}-PublicSubnetOne" + PrivateSubnetMSKOne: + Description: The ID of private subnet one created + Value: !Ref 'PrivateSubnetMSKOne' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKOne" + PrivateSubnetMSKTwo: + Description: The ID of private subnet two created + Value: !Ref 'PrivateSubnetMSKTwo' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKTwo" + PrivateSubnetMSKThree: + Description: The ID of private subnet three created + Value: !Ref 'PrivateSubnetMSKThree' + Export: + Name: !Sub "${AWS::StackName}-PrivateSubnetMSKThree" + VPCStackName: + Description: The name of the VPC Stack + Value: !Ref 'AWS::StackName' + Export: + Name: !Sub "${AWS::StackName}-VPCStackName" + MSKArn: + Description: Provisioned MSK Cluster ARN. + Value: !Ref MSKCluster + Export: + Name: !Sub "${AWS::StackName}-MSKArn" + Condition: "CreateProvisionedCluster" + CredentialsSecretArn: + Description: ARN for secret manager secret with credentials. + Value: !Ref CredentialsSecret + Export: + Name: !Sub "${AWS::StackName}-CredentialsSecret" + Condition: "CreateProvisionedCluster" + ServerlessMSKArn: + Description: Serverless MSK Cluster ARN. + Value: !Ref ServerlessMSKCluster + Export: + Name: !Sub "${AWS::StackName}-Serverless" + Condition: "CreateServerlessCluster" + SecurityGroupId: + Description: ID of scurity group for MSK clients. + Value: !GetAtt MSKSecurityGroup.GroupId + Export: + Name: !Sub "${AWS::StackName}-SecurityGroupId" + EC2InstanceEndpointID: + Description: The ID of the EC2 Instance Endpoint + Value: !Ref EC2InstanceEndpoint + KafkaTopicForLambda: + Description: The Topic to use for the Java Lambda Function + Value: !Ref KafkaTopicForLambda + Export: + Name: !Sub "${AWS::StackName}-KafkaTopicForLambda" + ContactSchemaArn: + Description: ARN of the Contact Schema Registry + Value: !Ref ContactSchema + Export: + Name: !Sub "${AWS::StackName}-ContactSchemaArn" \ No newline at end of file diff --git a/msk-lambda-schema-avro-java-sam/README.md b/msk-lambda-schema-avro-java-sam/README.md new file mode 100644 index 000000000..de7cca157 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/README.md @@ -0,0 +1,213 @@ +# msk-lambda-schema-avro-java-sam +# Java AWS Lambda Kafka consumer and AVRO producer with Schema Registry and AVRO, using AWS SAM + +This pattern is an example of Lambda functions that: +1. Consume messages from an Amazon Managed Streaming for Kafka (Amazon MSK) topic +2. Produce AVRO-formatted messages to an Amazon MSK topic using Schema Registry + +Both functions use IAM authentication to connect to the MSK Cluster and leverage AWS Glue Schema Registry for AVRO schema management. + +This project contains source code and supporting files for a serverless application that you can deploy with the SAM CLI. It includes the following files and folders. + +- kafka_event_consumer_function/src/main/java - Code for the consumer Lambda function. +- kafka_event_producer_function/src/main/java - Code for the AVRO producer Lambda function. +- events - Invocation events that you can use to invoke the functions. +- kafka_event_consumer_function/src/test/java - Unit tests for the consumer code. +- template.yaml - A template that defines the application's Lambda functions. +- template_original.yaml - The original template with placeholders that get replaced during deployment. +- MSKAndKafkaClientEC2.yaml - A Cloudformation template file that can be used to deploy an MSK cluster and also deploy an EC2 machine with all pre-requisities already installed, so you can directly build and deploy the lambda functions and test them out. + +Important: this application uses various AWS services and there are costs associated with these services after the Free Tier usage - please see the [AWS Pricing page](https://aws.amazon.com/pricing/) for details. You are responsible for any AWS costs incurred. No warranty is implied in this example. + +## Requirements + +* [Create an AWS account](https://portal.aws.amazon.com/gp/aws/developer/registration/index.html) if you do not already have one and log in. The IAM user that you use must have sufficient permissions to make necessary AWS service calls and manage AWS resources. + +## Run the Cloudformation template to create the MSK Cluster and Client EC2 machine + +* [Run the Cloudformation template using the file MSKAndKafkaClientEC2.yaml] - You can go to the AWS Cloudformation console, create a new stack by specifying the template file. You can keep the defaults for input parameters or modify them as necessary. Wait for the Cloudformation stack to be created. This Cloudformation template will create an MSK cluster (Provisioned or Serverless based on your selection). It will also create an EC2 machine that you can use as a client. + +* [Connect to the EC2 machine] - Once the Cloudformation stack is created, you can go to the EC2 console and log into the machine using either "Connect using EC2 Instance Connect" or "Connect using EC2 Instance Connect Endpoint" option under the "EC2 Instance Connect" tab. +Note: You may need to wait for some time after the Cloudformation stack is created, as some UserData scripts continue running after the Cloudformation stack shows Created. + +* [Check if Kafka Topic has been created] - Once you are inside the EC2 machine, you should be in the /home/ec2-user folder. Check to see the contents of the file kafka_topic_creator_output.txt by running the command cat kafka_topic_creator_output.txt. You should see an output such as "Created topic MskIamJavaLambdaTopic." + +If you are not able to find the file kafka_topic_creator_output.txt or if it is blank or you see an error message, then you need to run the file ./kafka_topic_creator.sh. This file runs a script that goes and creates the Kafka topic that the Lambda function will subscribe to. + +## Pre-requisites to Deploy the sample Lambda function + +The EC2 machine that was created by running the Cloudformation template has all the software that will be needed to deploy the Lambda function. + +The AWS SAM CLI is a serverless tool for building and testing Lambda applications. It uses Docker to locally test your functions in an Amazon Linux environment that resembles the Lambda execution environment. It can also emulate your application's build environment and API. + +* Java - On the EC2 machine, we have installed the version of Java that you selected. We have installed Amazon Corrretto JDK of the version that you had selected at the time of specifying the input parameters in the Cloudformation template. At the time of publishing this pattern, only Java versions 11, 17 and 21 are supported by AWS SAM +* Maven - On the EC2 machine, we have installed Maven (https://maven.apache.org/install.html) +* AWS SAM CLI - We have installed the AWS SAM CLI (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +* Docker - We have installed the Docker Community Edition on the EC2 machine (https://hub.docker.com/search/?type=edition&offering=community) + +We have also cloned the Github repository for serverless-patterns on the EC2 machine already by running the below command + ``` + git clone https://github.com/aws-samples/serverless-patterns.git + ``` +Change directory to the pattern directory: + ``` + cd serverless-patterns/msk-lambda-schema-avro-java-sam + ``` + +## Build the application + +Build your application with the `sam build` command. + +```bash +sam build +``` + +The SAM CLI installs dependencies defined in `kafka_event_consumer_function/pom.xml`, creates a deployment package, and saves it in the `.aws-sam/build` folder. + +## Deploy the sample application + +To deploy your application for the first time, run the following in your shell: + +```bash +sam deploy --capabilities CAPABILITY_IAM --no-confirm-changeset --no-disable-rollback --region $AWS_REGION --stack-name msk-lambda-schema-avro-java-sam --guided +``` + +The sam deploy command will package and deploy your application to AWS, with a series of prompts: + +* **Stack Name**: The name of the stack to deploy to CloudFormation. This should be unique to your account and region, and a good starting point would be something matching your project name. +* **AWS Region**: The AWS region you want to deploy your app to. +* **Parameter MSKClusterName**: The name of the MSK Cluster. This will be `-cluster` from the CloudFormation template you deployed in the previous step. +* **Parameter MSKClusterId**: The unique ID of the MSK Cluster. This can be found in the MSK console or extracted from the MSK ARN in the CloudFormation outputs. +* **Parameter MSKTopic**: The Kafka topic on which the Lambda functions will produce and consume messages. You can find this in the CloudFormation outputs as `KafkaTopicForLambda`. +* **Parameter ContactSchemaName**: The name of the schema to be used for the AVRO serialization (default: ContactSchema). +* **Parameter VpcId**: The ID of the VPC where the MSK cluster is deployed. You can find this in the CloudFormation outputs as `VPCId`. +* **Parameter SubnetIds**: Comma-separated list of subnet IDs where the MSK cluster is deployed. You can find these in the CloudFormation outputs as `PrivateSubnetMSKOne`, `PrivateSubnetMSKTwo`, and `PrivateSubnetMSKThree`. +* **Parameter SecurityGroupIds**: Comma-separated list of security group IDs that allow access to the MSK cluster. You can find this in the CloudFormation outputs as `SecurityGroupId`. +* **Confirm changes before deploy**: If set to yes, any change sets will be shown to you before execution for manual review. +* **Allow SAM CLI IAM role creation**: Many AWS SAM templates, including this example, create AWS IAM roles required for the AWS Lambda function(s) included to access AWS services. By default, these are scoped down to minimum required permissions. +* **Disable rollback**: Defaults to No and it preserves the state of previously provisioned resources when an operation fails. +* **Save arguments to configuration file**: If set to yes, your choices will be saved to a configuration file inside the project. +* **SAM configuration file [samconfig.toml]**: Name of the configuration file to store configuration information locally. +* **SAM configuration environment [default]**: Environment for storing deployment information locally. + +You should get a message "Successfully created/updated stack - in " if all goes well. + +**Note: You must retrieve the required parameters from the CloudFormation outputs in the AWS Console after deploying the MSKAndKafkaClientEC2.yaml template. These outputs contain all the necessary information for deploying the Lambda functions.** + + +## Test the sample application + +Once the Lambda functions are deployed, you can test the application by invoking the producer Lambda function, which will generate AVRO-formatted messages to the MSK topic. The consumer Lambda function will then automatically process these messages. + +### Option 1: Invoke the producer Lambda function using AWS CLI + +You can invoke the producer Lambda function using the AWS CLI with the following command: + +```bash +aws lambda invoke \ + --function-name msk-lambda-schema-avro-java-sam-LambdaMSKProducerJavaFunction-XXXXXXXXXXXX \ + --payload '{"message": "Test message using AVRO and Schema Registry"}' \ + --cli-binary-format raw-in-base64-out \ + response.json +``` + +You can find the exact function name in the AWS Lambda console or by running: + +```bash +aws lambda list-functions --query "Functions[?contains(FunctionName, 'Producer')].FunctionName" +``` + +### Option 2: Invoke the producer Lambda function using AWS Console + +1. Open the [AWS Lambda Console](https://console.aws.amazon.com/lambda) +2. Find and select your producer Lambda function (it will be named something like `msk-lambda-schema-avro-java-sam-LambdaMSKProducerJavaFunction-XXXXXXXXXXXX`) +3. Click on the "Test" tab +4. Create a new test event with the following JSON payload: + ```json + { + "message": "Test message using AVRO and Schema Registry" + } + ``` +5. Click "Test" to invoke the function + +### Verify the results + +After invoking the producer function, check the CloudWatch logs for both Lambda functions: + +1. Open the [CloudWatch Logs Console](https://console.aws.amazon.com/cloudwatch/home#logs:) +2. Find the log groups for both your producer and consumer Lambda functions: + - Producer log group: `/aws/lambda/msk-lambda-schema-avro-java-sam-LambdaMSKProducerJavaFunction-XXXXXXXXXXXX` + - Consumer log group: `/aws/lambda/msk-lambda-schema-avro-java-sam-LambdaMSKConsumerJavaFunction-XXXXXXXXXXXX` + + You can search for these log groups by typing "msk-lambda-schema-avro-java-sam" in the filter box. + +3. Click on each log group and then select the most recent log stream (typically named with a timestamp and UUID) +4. In the producer logs, look for entries showing: + - Successful serialization of the message using AVRO format + - Successful registration or retrieval of the schema from Schema Registry + - Confirmation that the message was sent to the MSK topic + +5. In the consumer logs, look for entries showing: + - Receipt of the message batch from the MSK topic + - Successful deserialization of the AVRO message + - The decoded message content and any processing performed on it + +The consumer Lambda function will automatically process messages from the MSK topic. It parses the Kafka messages and outputs the fields in the Kafka messages to CloudWatch logs. + +Each key has a list of messages. Each Kafka message has the following properties - Topic, Partition, Offset, TimeStamp, TimeStampType, Key and Value + +The Key and Value are base64 encoded and have to be decoded. A message can also have a list of headers, each header having a key and a value. + +The code in this example prints out the fields in the Kafka message and also decrypts the key and the value and logs them in Cloudwatch logs. + +### Message Filtering with Event Source Mapping + +This sample application demonstrates how to use event source mapping filters with Amazon MSK and Lambda. The producer Lambda function generates contacts with zip codes that start with either "1000" or "2000" (with approximately 50% probability for each). However, the consumer Lambda function is configured to only process messages where the zip code starts with "1000". + +#### Filter Configuration + +The filter is configured in the SAM template using the `FilterCriteria` property of the MSK event source mapping: + +```yaml +FilterCriteria: + Filters: + - Pattern: '{ "value": { "zip": [ { "prefix": "1000" } ] }}' +``` + +This filter pattern instructs the event source mapping to only send messages to the Lambda function if the message value contains a "zip" field that starts with "1000". + +#### Verifying the Filter Behavior + +To verify that the filter is working correctly, follow these steps: + +1. **Invoke the producer Lambda function** using one of the methods described above. + +2. **Check the producer function logs** in CloudWatch: + - Navigate to the CloudWatch Logs console + - Find the log group for the producer function (`/aws/lambda/msk-lambda-schema-avro-java-sam-LambdaMSKProducerJavaFunction-XXXXXXXXXXXX`) + - Open the most recent log stream + - Look for the "ZIP CODE DISTRIBUTION SUMMARY" section, which shows how many messages were generated with zip codes starting with "1000" and how many with "2000" + - You should see that the producer generated a mix of both zip code types + +3. **Check the consumer function logs** in CloudWatch: + - Navigate to the CloudWatch Logs console + - Find the log group for the consumer function (`/aws/lambda/msk-lambda-schema-avro-java-sam-LambdaMSKConsumerJavaFunction-XXXXXXXXXXXX`) + - Open the most recent log stream + - You should see that the consumer only processed messages with zip codes starting with "1000" + - Messages with zip codes starting with "2000" were filtered out by the event source mapping and never reached the Lambda function + +This demonstrates how event source mapping filters can be used to efficiently process only the messages that match specific criteria, reducing Lambda invocation costs and processing overhead. + +## Cleanup + +You can first clean-up the Lambda function by running the sam delete command + +``` +cd /home/ec2-user/serverless-patterns/msk-lambda-schema-avro-java-sam +sam delete + +``` +confirm by pressing y for both the questions +You should see the lambda function getting deleted and a final confirmation "Deleted successfully" on the command-line + +Next you need to delete the Cloudformation template that created the MSK Server and the EC2 machine by going to the Cloudformation console and selecting the stack and then hitting the "Delete" button. It will run for sometime but eventually you should see the stack getting cleaned up. If you get an error message that says the stack could not be deleted, please retry again and do a Force Delete. The reason this may happen is because ENIs created by the deplayed Lambda function in your VPC may prevent the VPC from being deleted even after deleting the lambda function. \ No newline at end of file diff --git a/msk-lambda-schema-avro-java-sam/events/avro_producer_event.json b/msk-lambda-schema-avro-java-sam/events/avro_producer_event.json new file mode 100644 index 000000000..060282ed3 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/events/avro_producer_event.json @@ -0,0 +1,14 @@ +{ + "firstname": "John", + "lastname": "Doe", + "company": "Example Corp", + "street": "123 Main St", + "city": "Seattle", + "county": "King", + "state": "WA", + "zip": "98101", + "homePhone": "555-123-4567", + "cellPhone": "555-987-6543", + "email": "john.doe@example.com", + "website": "https://www.johndoe.com" +} diff --git a/msk-lambda-schema-avro-java-sam/events/event.json b/msk-lambda-schema-avro-java-sam/events/event.json new file mode 100644 index 000000000..b527986ab --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/events/event.json @@ -0,0 +1,31 @@ +{ + "records":{ + "myTopic-0":[ + { + "topic":"myTopic", + "partition":0, + "offset":250, + "timestamp":1678072110111, + "timestampType":"CREATE_TIME", + "value":"Zg==", + "headers":[ + + ] + }, + { + "topic":"myTopic", + "partition":0, + "offset":251, + "timestamp":1678072111086, + "timestampType":"CREATE_TIME", + "value":"Zw==", + "headers":[ + + ] + } + ] + }, + "eventSource":"aws:kafka", + "eventSourceArn":"arn:aws:kafka:us-west-2:123456789012:cluster/MSKWorkshopCluster/a93759a9-c9d0-4952-984c-492c6bfa2be8-13", + "bootstrapServers":"b-2.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098,b-3.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098,b-1.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098" +} \ No newline at end of file diff --git a/msk-lambda-schema-avro-java-sam/example-pattern.json b/msk-lambda-schema-avro-java-sam/example-pattern.json new file mode 100644 index 000000000..2c316f3fd --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/example-pattern.json @@ -0,0 +1,99 @@ +{ + "title": "AWS Lambda functions for MSK with AVRO and Schema Registry (Java)", + "description": "Creates Lambda functions that consume from and produce AVRO-formatted messages to Amazon MSK topics using AWS Glue Schema Registry and IAM authentication.", + "language": "Java", + "level": "300", + "framework": "AWS SAM", + "introBox": { + "headline": "How it works", + "text": [ + "This pattern provides two Lambda functions: a consumer that processes messages from a MSK topic, and a producer that generates AVRO-formatted messages using AWS Glue Schema Registry.", + "The CloudFormation template provided in this pattern deploys a MSK Cluster and creates a Kafka topic.", + "This pattern demonstrates how to use AVRO serialization with AWS Glue Schema Registry for efficient message handling in a Kafka-based architecture.", + "The pattern works with either a Provisioned or Serverless MSK cluster as long as the cluster is configured to use IAM authentication.", + "For detailed deployment instructions see the README.md" + ] + }, + "gitHub": { + "template": { + "repoURL": "https://github.com/aws-samples/serverless-patterns/tree/main/msk-lambda-schema-avro-java-sam", + "templateURL": "serverless-patterns/msk-lambda-schema-avro-java-sam", + "projectFolder": "msk-lambda-schema-avro-java-sam", + "templateFile": "template.yaml" + } + }, + "resources": { + "bullets": [ + { + "text": "Amazon MSK cluster pattern", + "link": "https://serverlessland.com/patterns/msk-cfn-sasl-lambda" + }, + { + "text": "Using AWS Lambda with Amazon MSK", + "link": "https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html" + }, + { + "text": "AWS Glue Schema Registry", + "link": "https://docs.aws.amazon.com/glue/latest/dg/schema-registry.html" + }, + { + "text": "Using the AWS Glue Schema Registry with Amazon MSK", + "link": "https://docs.aws.amazon.com/msk/latest/developerguide/integrations-schema-registry.html" + }, + { + "text": "AWS CloudFormation Provisioned MSK cluster reference", + "link": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html" + }, + { + "text": "AWS CloudFormation Serverless MSK cluster reference", + "link": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-serverlesscluster.html" + } + ] + }, + "deploy": { + "text": [ + "sam deploy --guided" + ] + }, + "testing": { + "text": [ + "Invoke the producer Lambda function to generate AVRO-formatted messages to the MSK topic.", + "The consumer Lambda function will automatically process these messages.", + "See the GitHub repo for detailed testing instructions." + ] + }, + "cleanup": { + "text": [ + "Delete the Lambda functions: sam delete.", + "Delete the CloudFormation stack that created the MSK cluster and EC2 client machine." + ] + }, + "authors": [ + { + "name": "Indranil Banerjee", + "bio": "AWS - Senior Solutions Architect", + "linkedin": "https://www.linkedin.com/in/indranil-banerjee-b00a261/" + }, + { + "name": "Vaibhav Jain", + "bio": "AWS - Sr. Application Architect", + "linkedin": "https://www.linkedin.com/in/vaibhavjainv/" + }, + { + "name": "Adam Wagner", + "bio": "AWS - Principal Serverless Solutions Architect", + "linkedin": "https://www.linkedin.com/in/adam-wagner-4bb412/" + }, + { + "name": "Philipp Page", + "bio": "AWS - SA Engineer", + "linkedin": "https://www.linkedin.com/in/philipp-page/" + }, + { + "name": "Leandro Cavalcante Damascena", + "bio": "AWS - Sr. SA Engineer", + "linkedin": "https://www.linkedin.com/in/leandrodamascena/" + } + ] + } + \ No newline at end of file diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.classpath b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.classpath new file mode 100644 index 000000000..ff370dd09 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.classpath @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.gitignore b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.gitignore new file mode 100644 index 000000000..b83d22266 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.gitignore @@ -0,0 +1 @@ +/target/ diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.project b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.project new file mode 100644 index 000000000..b0e744878 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.project @@ -0,0 +1,23 @@ + + + kafka_event_consumer_function + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.m2e.core.maven2Builder + + + + + + org.eclipse.jdt.core.javanature + org.eclipse.m2e.core.maven2Nature + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.core.resources.prefs b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.core.resources.prefs new file mode 100644 index 000000000..839d647ee --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.core.resources.prefs @@ -0,0 +1,5 @@ +eclipse.preferences.version=1 +encoding//src/main/java=UTF-8 +encoding//src/main/resources=UTF-8 +encoding//src/test/java=UTF-8 +encoding/=UTF-8 diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.jdt.apt.core.prefs b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.jdt.apt.core.prefs new file mode 100644 index 000000000..dfa4f3adb --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.jdt.apt.core.prefs @@ -0,0 +1,4 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.apt.aptEnabled=true +org.eclipse.jdt.apt.genSrcDir=target/generated-sources/annotations +org.eclipse.jdt.apt.genTestSrcDir=target/generated-test-sources/test-annotations diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.jdt.core.prefs b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 000000000..2985089d8 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,9 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.targetPlatform=11 +org.eclipse.jdt.core.compiler.compliance=11 +org.eclipse.jdt.core.compiler.problem.enablePreviewFeatures=disabled +org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning +org.eclipse.jdt.core.compiler.problem.reportPreviewFeatures=ignore +org.eclipse.jdt.core.compiler.processAnnotations=enabled +org.eclipse.jdt.core.compiler.release=disabled +org.eclipse.jdt.core.compiler.source=11 diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.m2e.core.prefs b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.m2e.core.prefs new file mode 100644 index 000000000..f897a7f1c --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/.settings/org.eclipse.m2e.core.prefs @@ -0,0 +1,4 @@ +activeProfiles= +eclipse.preferences.version=1 +resolveWorkspaceProjects=true +version=1 diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/pom.xml b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/pom.xml new file mode 100644 index 000000000..620a4e21d --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/pom.xml @@ -0,0 +1,212 @@ + + + 4.0.0 + + com.amazonaws.services.lambda.samples.events.msk + kafka-event-consumer-function + 1.0.0 + jar + Kafka Event Consumer Function + + UTF-8 + 11 + 11 + + + + + com.amazonaws + aws-lambda-java-core + 1.2.2 + + + com.amazonaws + aws-lambda-java-events + 3.11.0 + + + com.google.code.gson + gson + 2.10.1 + + + org.apache.logging.log4j + log4j-api + 2.20.0 + + + org.apache.logging.log4j + log4j-core + 2.20.0 + + + org.apache.logging.log4j + log4j-slf4j18-impl + 2.18.0 + + + org.junit.jupiter + junit-jupiter-api + 5.10.0 + test + + + org.junit.jupiter + junit-jupiter-engine + 5.10.0 + test + + + + + software.amazon.awssdk + glue + 2.20.56 + + + + + org.apache.avro + avro + 1.11.1 + + + + software.amazon.lambda + powertools-kafka + 2.1.0 + + + + software.amazon.lambda + powertools-logging-log4j + 2.1.0 + + + + + org.apache.kafka + kafka-clients + 4.0.0 + + + + + org.aspectj + aspectjrt + 1.9.22 + + + + + + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + dev.aspectj + aspectj-maven-plugin + [1.14,) + + compile + + + + + true + + + + + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 3.4.1 + + false + + + + package + + shade + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.11.0 + + + org.apache.maven.plugins + maven-surefire-plugin + 3.1.2 + + + org.apache.avro + avro-maven-plugin + 1.11.3 + + + generate-sources + + schema + + + ${project.basedir}/src/main/avro/ + ${project.basedir}/src/main/java/ + String + + + + + + dev.aspectj + aspectj-maven-plugin + 1.14 + + 11 + 11 + 11 + + + software.amazon.lambda + powertools-logging + + + + + + org.aspectj + aspectjtools + + 1.9.22 + + + + + + compile + + + + + + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/avro/contact.avsc b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/avro/contact.avsc new file mode 100644 index 000000000..13a00de1e --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/avro/contact.avsc @@ -0,0 +1,19 @@ +{ + "type": "record", + "name": "Contact", + "namespace": "com.amazonaws.services.lambda.samples.events.msk", + "fields": [ + {"name": "firstname", "type": ["null", "string"], "default": null}, + {"name": "lastname", "type": ["null", "string"], "default": null}, + {"name": "company", "type": ["null", "string"], "default": null}, + {"name": "street", "type": ["null", "string"], "default": null}, + {"name": "city", "type": ["null", "string"], "default": null}, + {"name": "county", "type": ["null", "string"], "default": null}, + {"name": "state", "type": ["null", "string"], "default": null}, + {"name": "zip", "type": ["null", "string"], "default": null}, + {"name": "homePhone", "type": ["null", "string"], "default": null}, + {"name": "cellPhone", "type": ["null", "string"], "default": null}, + {"name": "email", "type": ["null", "string"], "default": null}, + {"name": "website", "type": ["null", "string"], "default": null} + ] +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroKafkaHandler.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroKafkaHandler.java new file mode 100644 index 000000000..6dce5c672 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroKafkaHandler.java @@ -0,0 +1,40 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.lambda.powertools.kafka.Deserialization; +import software.amazon.lambda.powertools.kafka.DeserializationType; +import software.amazon.lambda.powertools.logging.Logging; + +public class AvroKafkaHandler implements RequestHandler, String> { + private static final Logger LOGGER = LoggerFactory.getLogger(AvroKafkaHandler.class); + + @Override + @Logging(logEvent = true) + @Deserialization(type = DeserializationType.KAFKA_AVRO) + public String handleRequest(ConsumerRecords records, Context context) { + LOGGER.info("=== AvroKafkaHandler called ==="); + LOGGER.info("Event object: {}", records); + LOGGER.info("Number of records: {}", records.count()); + + for (ConsumerRecord record : records) { + LOGGER.info("Processing record - Topic: {}, Partition: {}, Offset: {}", + record.topic(), record.partition(), record.offset()); + LOGGER.info("Record key: {}", record.key()); + LOGGER.info("Record value: {}", record.value()); + + if (record.value() != null) { + Contact contact = record.value(); + LOGGER.info("Contact details - firstName: {}, zip: {}", + contact.getFirstname(), contact.getZip()); + } + } + + LOGGER.info("=== AvroKafkaHandler completed ==="); + return "OK"; + } +} \ No newline at end of file diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/Contact.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/Contact.java new file mode 100644 index 000000000..20f712581 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/Contact.java @@ -0,0 +1,1389 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package com.amazonaws.services.lambda.samples.events.msk; + +import org.apache.avro.specific.SpecificData; +import org.apache.avro.message.BinaryMessageEncoder; +import org.apache.avro.message.BinaryMessageDecoder; +import org.apache.avro.message.SchemaStore; + +@org.apache.avro.specific.AvroGenerated +public class Contact extends org.apache.avro.specific.SpecificRecordBase { + private static final long serialVersionUID = -4035028153225992319L; + + + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Contact\",\"namespace\":\"com.amazonaws.services.lambda.samples.events.msk\",\"fields\":[{\"name\":\"firstname\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"lastname\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"company\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"street\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"city\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"county\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"state\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"zip\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"homePhone\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"cellPhone\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"email\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"website\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + + private static final SpecificData MODEL$ = new SpecificData(); + + private static final BinaryMessageEncoder ENCODER = + new BinaryMessageEncoder<>(MODEL$, SCHEMA$); + + private static final BinaryMessageDecoder DECODER = + new BinaryMessageDecoder<>(MODEL$, SCHEMA$); + + /** + * Return the BinaryMessageEncoder instance used by this class. + * @return the message encoder used by this class + */ + public static BinaryMessageEncoder getEncoder() { + return ENCODER; + } + + /** + * Return the BinaryMessageDecoder instance used by this class. + * @return the message decoder used by this class + */ + public static BinaryMessageDecoder getDecoder() { + return DECODER; + } + + /** + * Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}. + * @param resolver a {@link SchemaStore} used to find schemas by fingerprint + * @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore + */ + public static BinaryMessageDecoder createDecoder(SchemaStore resolver) { + return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver); + } + + /** + * Serializes this Contact to a ByteBuffer. + * @return a buffer holding the serialized data for this instance + * @throws java.io.IOException if this instance could not be serialized + */ + public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException { + return ENCODER.encode(this); + } + + /** + * Deserializes a Contact from a ByteBuffer. + * @param b a byte buffer holding serialized data for an instance of this class + * @return a Contact instance decoded from the given buffer + * @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class + */ + public static Contact fromByteBuffer( + java.nio.ByteBuffer b) throws java.io.IOException { + return DECODER.decode(b); + } + + private java.lang.String firstname; + private java.lang.String lastname; + private java.lang.String company; + private java.lang.String street; + private java.lang.String city; + private java.lang.String county; + private java.lang.String state; + private java.lang.String zip; + private java.lang.String homePhone; + private java.lang.String cellPhone; + private java.lang.String email; + private java.lang.String website; + + /** + * Default constructor. Note that this does not initialize fields + * to their default values from the schema. If that is desired then + * one should use newBuilder(). + */ + public Contact() {} + + /** + * All-args constructor. + * @param firstname The new value for firstname + * @param lastname The new value for lastname + * @param company The new value for company + * @param street The new value for street + * @param city The new value for city + * @param county The new value for county + * @param state The new value for state + * @param zip The new value for zip + * @param homePhone The new value for homePhone + * @param cellPhone The new value for cellPhone + * @param email The new value for email + * @param website The new value for website + */ + public Contact(java.lang.String firstname, java.lang.String lastname, java.lang.String company, java.lang.String street, java.lang.String city, java.lang.String county, java.lang.String state, java.lang.String zip, java.lang.String homePhone, java.lang.String cellPhone, java.lang.String email, java.lang.String website) { + this.firstname = firstname; + this.lastname = lastname; + this.company = company; + this.street = street; + this.city = city; + this.county = county; + this.state = state; + this.zip = zip; + this.homePhone = homePhone; + this.cellPhone = cellPhone; + this.email = email; + this.website = website; + } + + @Override + public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; } + + @Override + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + + // Used by DatumWriter. Applications should not call. + @Override + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return firstname; + case 1: return lastname; + case 2: return company; + case 3: return street; + case 4: return city; + case 5: return county; + case 6: return state; + case 7: return zip; + case 8: return homePhone; + case 9: return cellPhone; + case 10: return email; + case 11: return website; + default: throw new IndexOutOfBoundsException("Invalid index: " + field$); + } + } + + // Used by DatumReader. Applications should not call. + @Override + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: firstname = value$ != null ? value$.toString() : null; break; + case 1: lastname = value$ != null ? value$.toString() : null; break; + case 2: company = value$ != null ? value$.toString() : null; break; + case 3: street = value$ != null ? value$.toString() : null; break; + case 4: city = value$ != null ? value$.toString() : null; break; + case 5: county = value$ != null ? value$.toString() : null; break; + case 6: state = value$ != null ? value$.toString() : null; break; + case 7: zip = value$ != null ? value$.toString() : null; break; + case 8: homePhone = value$ != null ? value$.toString() : null; break; + case 9: cellPhone = value$ != null ? value$.toString() : null; break; + case 10: email = value$ != null ? value$.toString() : null; break; + case 11: website = value$ != null ? value$.toString() : null; break; + default: throw new IndexOutOfBoundsException("Invalid index: " + field$); + } + } + + /** + * Gets the value of the 'firstname' field. + * @return The value of the 'firstname' field. + */ + public java.lang.String getFirstname() { + return firstname; + } + + + /** + * Sets the value of the 'firstname' field. + * @param value the value to set. + */ + public void setFirstname(java.lang.String value) { + this.firstname = value; + } + + /** + * Gets the value of the 'lastname' field. + * @return The value of the 'lastname' field. + */ + public java.lang.String getLastname() { + return lastname; + } + + + /** + * Sets the value of the 'lastname' field. + * @param value the value to set. + */ + public void setLastname(java.lang.String value) { + this.lastname = value; + } + + /** + * Gets the value of the 'company' field. + * @return The value of the 'company' field. + */ + public java.lang.String getCompany() { + return company; + } + + + /** + * Sets the value of the 'company' field. + * @param value the value to set. + */ + public void setCompany(java.lang.String value) { + this.company = value; + } + + /** + * Gets the value of the 'street' field. + * @return The value of the 'street' field. + */ + public java.lang.String getStreet() { + return street; + } + + + /** + * Sets the value of the 'street' field. + * @param value the value to set. + */ + public void setStreet(java.lang.String value) { + this.street = value; + } + + /** + * Gets the value of the 'city' field. + * @return The value of the 'city' field. + */ + public java.lang.String getCity() { + return city; + } + + + /** + * Sets the value of the 'city' field. + * @param value the value to set. + */ + public void setCity(java.lang.String value) { + this.city = value; + } + + /** + * Gets the value of the 'county' field. + * @return The value of the 'county' field. + */ + public java.lang.String getCounty() { + return county; + } + + + /** + * Sets the value of the 'county' field. + * @param value the value to set. + */ + public void setCounty(java.lang.String value) { + this.county = value; + } + + /** + * Gets the value of the 'state' field. + * @return The value of the 'state' field. + */ + public java.lang.String getState() { + return state; + } + + + /** + * Sets the value of the 'state' field. + * @param value the value to set. + */ + public void setState(java.lang.String value) { + this.state = value; + } + + /** + * Gets the value of the 'zip' field. + * @return The value of the 'zip' field. + */ + public java.lang.String getZip() { + return zip; + } + + + /** + * Sets the value of the 'zip' field. + * @param value the value to set. + */ + public void setZip(java.lang.String value) { + this.zip = value; + } + + /** + * Gets the value of the 'homePhone' field. + * @return The value of the 'homePhone' field. + */ + public java.lang.String getHomePhone() { + return homePhone; + } + + + /** + * Sets the value of the 'homePhone' field. + * @param value the value to set. + */ + public void setHomePhone(java.lang.String value) { + this.homePhone = value; + } + + /** + * Gets the value of the 'cellPhone' field. + * @return The value of the 'cellPhone' field. + */ + public java.lang.String getCellPhone() { + return cellPhone; + } + + + /** + * Sets the value of the 'cellPhone' field. + * @param value the value to set. + */ + public void setCellPhone(java.lang.String value) { + this.cellPhone = value; + } + + /** + * Gets the value of the 'email' field. + * @return The value of the 'email' field. + */ + public java.lang.String getEmail() { + return email; + } + + + /** + * Sets the value of the 'email' field. + * @param value the value to set. + */ + public void setEmail(java.lang.String value) { + this.email = value; + } + + /** + * Gets the value of the 'website' field. + * @return The value of the 'website' field. + */ + public java.lang.String getWebsite() { + return website; + } + + + /** + * Sets the value of the 'website' field. + * @param value the value to set. + */ + public void setWebsite(java.lang.String value) { + this.website = value; + } + + /** + * Creates a new Contact RecordBuilder. + * @return A new Contact RecordBuilder + */ + public static com.amazonaws.services.lambda.samples.events.msk.Contact.Builder newBuilder() { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(); + } + + /** + * Creates a new Contact RecordBuilder by copying an existing Builder. + * @param other The existing builder to copy. + * @return A new Contact RecordBuilder + */ + public static com.amazonaws.services.lambda.samples.events.msk.Contact.Builder newBuilder(com.amazonaws.services.lambda.samples.events.msk.Contact.Builder other) { + if (other == null) { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(); + } else { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(other); + } + } + + /** + * Creates a new Contact RecordBuilder by copying an existing Contact instance. + * @param other The existing instance to copy. + * @return A new Contact RecordBuilder + */ + public static com.amazonaws.services.lambda.samples.events.msk.Contact.Builder newBuilder(com.amazonaws.services.lambda.samples.events.msk.Contact other) { + if (other == null) { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(); + } else { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(other); + } + } + + /** + * RecordBuilder for Contact instances. + */ + @org.apache.avro.specific.AvroGenerated + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String firstname; + private java.lang.String lastname; + private java.lang.String company; + private java.lang.String street; + private java.lang.String city; + private java.lang.String county; + private java.lang.String state; + private java.lang.String zip; + private java.lang.String homePhone; + private java.lang.String cellPhone; + private java.lang.String email; + private java.lang.String website; + + /** Creates a new Builder */ + private Builder() { + super(SCHEMA$, MODEL$); + } + + /** + * Creates a Builder by copying an existing Builder. + * @param other The existing Builder to copy. + */ + private Builder(com.amazonaws.services.lambda.samples.events.msk.Contact.Builder other) { + super(other); + if (isValidValue(fields()[0], other.firstname)) { + this.firstname = data().deepCopy(fields()[0].schema(), other.firstname); + fieldSetFlags()[0] = other.fieldSetFlags()[0]; + } + if (isValidValue(fields()[1], other.lastname)) { + this.lastname = data().deepCopy(fields()[1].schema(), other.lastname); + fieldSetFlags()[1] = other.fieldSetFlags()[1]; + } + if (isValidValue(fields()[2], other.company)) { + this.company = data().deepCopy(fields()[2].schema(), other.company); + fieldSetFlags()[2] = other.fieldSetFlags()[2]; + } + if (isValidValue(fields()[3], other.street)) { + this.street = data().deepCopy(fields()[3].schema(), other.street); + fieldSetFlags()[3] = other.fieldSetFlags()[3]; + } + if (isValidValue(fields()[4], other.city)) { + this.city = data().deepCopy(fields()[4].schema(), other.city); + fieldSetFlags()[4] = other.fieldSetFlags()[4]; + } + if (isValidValue(fields()[5], other.county)) { + this.county = data().deepCopy(fields()[5].schema(), other.county); + fieldSetFlags()[5] = other.fieldSetFlags()[5]; + } + if (isValidValue(fields()[6], other.state)) { + this.state = data().deepCopy(fields()[6].schema(), other.state); + fieldSetFlags()[6] = other.fieldSetFlags()[6]; + } + if (isValidValue(fields()[7], other.zip)) { + this.zip = data().deepCopy(fields()[7].schema(), other.zip); + fieldSetFlags()[7] = other.fieldSetFlags()[7]; + } + if (isValidValue(fields()[8], other.homePhone)) { + this.homePhone = data().deepCopy(fields()[8].schema(), other.homePhone); + fieldSetFlags()[8] = other.fieldSetFlags()[8]; + } + if (isValidValue(fields()[9], other.cellPhone)) { + this.cellPhone = data().deepCopy(fields()[9].schema(), other.cellPhone); + fieldSetFlags()[9] = other.fieldSetFlags()[9]; + } + if (isValidValue(fields()[10], other.email)) { + this.email = data().deepCopy(fields()[10].schema(), other.email); + fieldSetFlags()[10] = other.fieldSetFlags()[10]; + } + if (isValidValue(fields()[11], other.website)) { + this.website = data().deepCopy(fields()[11].schema(), other.website); + fieldSetFlags()[11] = other.fieldSetFlags()[11]; + } + } + + /** + * Creates a Builder by copying an existing Contact instance + * @param other The existing instance to copy. + */ + private Builder(com.amazonaws.services.lambda.samples.events.msk.Contact other) { + super(SCHEMA$, MODEL$); + if (isValidValue(fields()[0], other.firstname)) { + this.firstname = data().deepCopy(fields()[0].schema(), other.firstname); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.lastname)) { + this.lastname = data().deepCopy(fields()[1].schema(), other.lastname); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.company)) { + this.company = data().deepCopy(fields()[2].schema(), other.company); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.street)) { + this.street = data().deepCopy(fields()[3].schema(), other.street); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.city)) { + this.city = data().deepCopy(fields()[4].schema(), other.city); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.county)) { + this.county = data().deepCopy(fields()[5].schema(), other.county); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.state)) { + this.state = data().deepCopy(fields()[6].schema(), other.state); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.zip)) { + this.zip = data().deepCopy(fields()[7].schema(), other.zip); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.homePhone)) { + this.homePhone = data().deepCopy(fields()[8].schema(), other.homePhone); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.cellPhone)) { + this.cellPhone = data().deepCopy(fields()[9].schema(), other.cellPhone); + fieldSetFlags()[9] = true; + } + if (isValidValue(fields()[10], other.email)) { + this.email = data().deepCopy(fields()[10].schema(), other.email); + fieldSetFlags()[10] = true; + } + if (isValidValue(fields()[11], other.website)) { + this.website = data().deepCopy(fields()[11].schema(), other.website); + fieldSetFlags()[11] = true; + } + } + + /** + * Gets the value of the 'firstname' field. + * @return The value. + */ + public java.lang.String getFirstname() { + return firstname; + } + + + /** + * Sets the value of the 'firstname' field. + * @param value The value of 'firstname'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setFirstname(java.lang.String value) { + validate(fields()[0], value); + this.firstname = value; + fieldSetFlags()[0] = true; + return this; + } + + /** + * Checks whether the 'firstname' field has been set. + * @return True if the 'firstname' field has been set, false otherwise. + */ + public boolean hasFirstname() { + return fieldSetFlags()[0]; + } + + + /** + * Clears the value of the 'firstname' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearFirstname() { + firstname = null; + fieldSetFlags()[0] = false; + return this; + } + + /** + * Gets the value of the 'lastname' field. + * @return The value. + */ + public java.lang.String getLastname() { + return lastname; + } + + + /** + * Sets the value of the 'lastname' field. + * @param value The value of 'lastname'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setLastname(java.lang.String value) { + validate(fields()[1], value); + this.lastname = value; + fieldSetFlags()[1] = true; + return this; + } + + /** + * Checks whether the 'lastname' field has been set. + * @return True if the 'lastname' field has been set, false otherwise. + */ + public boolean hasLastname() { + return fieldSetFlags()[1]; + } + + + /** + * Clears the value of the 'lastname' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearLastname() { + lastname = null; + fieldSetFlags()[1] = false; + return this; + } + + /** + * Gets the value of the 'company' field. + * @return The value. + */ + public java.lang.String getCompany() { + return company; + } + + + /** + * Sets the value of the 'company' field. + * @param value The value of 'company'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setCompany(java.lang.String value) { + validate(fields()[2], value); + this.company = value; + fieldSetFlags()[2] = true; + return this; + } + + /** + * Checks whether the 'company' field has been set. + * @return True if the 'company' field has been set, false otherwise. + */ + public boolean hasCompany() { + return fieldSetFlags()[2]; + } + + + /** + * Clears the value of the 'company' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearCompany() { + company = null; + fieldSetFlags()[2] = false; + return this; + } + + /** + * Gets the value of the 'street' field. + * @return The value. + */ + public java.lang.String getStreet() { + return street; + } + + + /** + * Sets the value of the 'street' field. + * @param value The value of 'street'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setStreet(java.lang.String value) { + validate(fields()[3], value); + this.street = value; + fieldSetFlags()[3] = true; + return this; + } + + /** + * Checks whether the 'street' field has been set. + * @return True if the 'street' field has been set, false otherwise. + */ + public boolean hasStreet() { + return fieldSetFlags()[3]; + } + + + /** + * Clears the value of the 'street' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearStreet() { + street = null; + fieldSetFlags()[3] = false; + return this; + } + + /** + * Gets the value of the 'city' field. + * @return The value. + */ + public java.lang.String getCity() { + return city; + } + + + /** + * Sets the value of the 'city' field. + * @param value The value of 'city'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setCity(java.lang.String value) { + validate(fields()[4], value); + this.city = value; + fieldSetFlags()[4] = true; + return this; + } + + /** + * Checks whether the 'city' field has been set. + * @return True if the 'city' field has been set, false otherwise. + */ + public boolean hasCity() { + return fieldSetFlags()[4]; + } + + + /** + * Clears the value of the 'city' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearCity() { + city = null; + fieldSetFlags()[4] = false; + return this; + } + + /** + * Gets the value of the 'county' field. + * @return The value. + */ + public java.lang.String getCounty() { + return county; + } + + + /** + * Sets the value of the 'county' field. + * @param value The value of 'county'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setCounty(java.lang.String value) { + validate(fields()[5], value); + this.county = value; + fieldSetFlags()[5] = true; + return this; + } + + /** + * Checks whether the 'county' field has been set. + * @return True if the 'county' field has been set, false otherwise. + */ + public boolean hasCounty() { + return fieldSetFlags()[5]; + } + + + /** + * Clears the value of the 'county' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearCounty() { + county = null; + fieldSetFlags()[5] = false; + return this; + } + + /** + * Gets the value of the 'state' field. + * @return The value. + */ + public java.lang.String getState() { + return state; + } + + + /** + * Sets the value of the 'state' field. + * @param value The value of 'state'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setState(java.lang.String value) { + validate(fields()[6], value); + this.state = value; + fieldSetFlags()[6] = true; + return this; + } + + /** + * Checks whether the 'state' field has been set. + * @return True if the 'state' field has been set, false otherwise. + */ + public boolean hasState() { + return fieldSetFlags()[6]; + } + + + /** + * Clears the value of the 'state' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearState() { + state = null; + fieldSetFlags()[6] = false; + return this; + } + + /** + * Gets the value of the 'zip' field. + * @return The value. + */ + public java.lang.String getZip() { + return zip; + } + + + /** + * Sets the value of the 'zip' field. + * @param value The value of 'zip'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setZip(java.lang.String value) { + validate(fields()[7], value); + this.zip = value; + fieldSetFlags()[7] = true; + return this; + } + + /** + * Checks whether the 'zip' field has been set. + * @return True if the 'zip' field has been set, false otherwise. + */ + public boolean hasZip() { + return fieldSetFlags()[7]; + } + + + /** + * Clears the value of the 'zip' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearZip() { + zip = null; + fieldSetFlags()[7] = false; + return this; + } + + /** + * Gets the value of the 'homePhone' field. + * @return The value. + */ + public java.lang.String getHomePhone() { + return homePhone; + } + + + /** + * Sets the value of the 'homePhone' field. + * @param value The value of 'homePhone'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setHomePhone(java.lang.String value) { + validate(fields()[8], value); + this.homePhone = value; + fieldSetFlags()[8] = true; + return this; + } + + /** + * Checks whether the 'homePhone' field has been set. + * @return True if the 'homePhone' field has been set, false otherwise. + */ + public boolean hasHomePhone() { + return fieldSetFlags()[8]; + } + + + /** + * Clears the value of the 'homePhone' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearHomePhone() { + homePhone = null; + fieldSetFlags()[8] = false; + return this; + } + + /** + * Gets the value of the 'cellPhone' field. + * @return The value. + */ + public java.lang.String getCellPhone() { + return cellPhone; + } + + + /** + * Sets the value of the 'cellPhone' field. + * @param value The value of 'cellPhone'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setCellPhone(java.lang.String value) { + validate(fields()[9], value); + this.cellPhone = value; + fieldSetFlags()[9] = true; + return this; + } + + /** + * Checks whether the 'cellPhone' field has been set. + * @return True if the 'cellPhone' field has been set, false otherwise. + */ + public boolean hasCellPhone() { + return fieldSetFlags()[9]; + } + + + /** + * Clears the value of the 'cellPhone' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearCellPhone() { + cellPhone = null; + fieldSetFlags()[9] = false; + return this; + } + + /** + * Gets the value of the 'email' field. + * @return The value. + */ + public java.lang.String getEmail() { + return email; + } + + + /** + * Sets the value of the 'email' field. + * @param value The value of 'email'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setEmail(java.lang.String value) { + validate(fields()[10], value); + this.email = value; + fieldSetFlags()[10] = true; + return this; + } + + /** + * Checks whether the 'email' field has been set. + * @return True if the 'email' field has been set, false otherwise. + */ + public boolean hasEmail() { + return fieldSetFlags()[10]; + } + + + /** + * Clears the value of the 'email' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearEmail() { + email = null; + fieldSetFlags()[10] = false; + return this; + } + + /** + * Gets the value of the 'website' field. + * @return The value. + */ + public java.lang.String getWebsite() { + return website; + } + + + /** + * Sets the value of the 'website' field. + * @param value The value of 'website'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setWebsite(java.lang.String value) { + validate(fields()[11], value); + this.website = value; + fieldSetFlags()[11] = true; + return this; + } + + /** + * Checks whether the 'website' field has been set. + * @return True if the 'website' field has been set, false otherwise. + */ + public boolean hasWebsite() { + return fieldSetFlags()[11]; + } + + + /** + * Clears the value of the 'website' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearWebsite() { + website = null; + fieldSetFlags()[11] = false; + return this; + } + + @Override + @SuppressWarnings("unchecked") + public Contact build() { + try { + Contact record = new Contact(); + record.firstname = fieldSetFlags()[0] ? this.firstname : (java.lang.String) defaultValue(fields()[0]); + record.lastname = fieldSetFlags()[1] ? this.lastname : (java.lang.String) defaultValue(fields()[1]); + record.company = fieldSetFlags()[2] ? this.company : (java.lang.String) defaultValue(fields()[2]); + record.street = fieldSetFlags()[3] ? this.street : (java.lang.String) defaultValue(fields()[3]); + record.city = fieldSetFlags()[4] ? this.city : (java.lang.String) defaultValue(fields()[4]); + record.county = fieldSetFlags()[5] ? this.county : (java.lang.String) defaultValue(fields()[5]); + record.state = fieldSetFlags()[6] ? this.state : (java.lang.String) defaultValue(fields()[6]); + record.zip = fieldSetFlags()[7] ? this.zip : (java.lang.String) defaultValue(fields()[7]); + record.homePhone = fieldSetFlags()[8] ? this.homePhone : (java.lang.String) defaultValue(fields()[8]); + record.cellPhone = fieldSetFlags()[9] ? this.cellPhone : (java.lang.String) defaultValue(fields()[9]); + record.email = fieldSetFlags()[10] ? this.email : (java.lang.String) defaultValue(fields()[10]); + record.website = fieldSetFlags()[11] ? this.website : (java.lang.String) defaultValue(fields()[11]); + return record; + } catch (org.apache.avro.AvroMissingFieldException e) { + throw e; + } catch (java.lang.Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } + + @SuppressWarnings("unchecked") + private static final org.apache.avro.io.DatumWriter + WRITER$ = (org.apache.avro.io.DatumWriter)MODEL$.createDatumWriter(SCHEMA$); + + @Override public void writeExternal(java.io.ObjectOutput out) + throws java.io.IOException { + WRITER$.write(this, SpecificData.getEncoder(out)); + } + + @SuppressWarnings("unchecked") + private static final org.apache.avro.io.DatumReader + READER$ = (org.apache.avro.io.DatumReader)MODEL$.createDatumReader(SCHEMA$); + + @Override public void readExternal(java.io.ObjectInput in) + throws java.io.IOException { + READER$.read(this, SpecificData.getDecoder(in)); + } + + @Override protected boolean hasCustomCoders() { return true; } + + @Override public void customEncode(org.apache.avro.io.Encoder out) + throws java.io.IOException + { + if (this.firstname == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.firstname); + } + + if (this.lastname == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.lastname); + } + + if (this.company == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.company); + } + + if (this.street == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.street); + } + + if (this.city == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.city); + } + + if (this.county == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.county); + } + + if (this.state == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.state); + } + + if (this.zip == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.zip); + } + + if (this.homePhone == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.homePhone); + } + + if (this.cellPhone == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.cellPhone); + } + + if (this.email == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.email); + } + + if (this.website == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.website); + } + + } + + @Override public void customDecode(org.apache.avro.io.ResolvingDecoder in) + throws java.io.IOException + { + org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff(); + if (fieldOrder == null) { + if (in.readIndex() != 1) { + in.readNull(); + this.firstname = null; + } else { + this.firstname = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.lastname = null; + } else { + this.lastname = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.company = null; + } else { + this.company = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.street = null; + } else { + this.street = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.city = null; + } else { + this.city = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.county = null; + } else { + this.county = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.state = null; + } else { + this.state = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.zip = null; + } else { + this.zip = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.homePhone = null; + } else { + this.homePhone = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.cellPhone = null; + } else { + this.cellPhone = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.email = null; + } else { + this.email = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.website = null; + } else { + this.website = in.readString(); + } + + } else { + for (int i = 0; i < 12; i++) { + switch (fieldOrder[i].pos()) { + case 0: + if (in.readIndex() != 1) { + in.readNull(); + this.firstname = null; + } else { + this.firstname = in.readString(); + } + break; + + case 1: + if (in.readIndex() != 1) { + in.readNull(); + this.lastname = null; + } else { + this.lastname = in.readString(); + } + break; + + case 2: + if (in.readIndex() != 1) { + in.readNull(); + this.company = null; + } else { + this.company = in.readString(); + } + break; + + case 3: + if (in.readIndex() != 1) { + in.readNull(); + this.street = null; + } else { + this.street = in.readString(); + } + break; + + case 4: + if (in.readIndex() != 1) { + in.readNull(); + this.city = null; + } else { + this.city = in.readString(); + } + break; + + case 5: + if (in.readIndex() != 1) { + in.readNull(); + this.county = null; + } else { + this.county = in.readString(); + } + break; + + case 6: + if (in.readIndex() != 1) { + in.readNull(); + this.state = null; + } else { + this.state = in.readString(); + } + break; + + case 7: + if (in.readIndex() != 1) { + in.readNull(); + this.zip = null; + } else { + this.zip = in.readString(); + } + break; + + case 8: + if (in.readIndex() != 1) { + in.readNull(); + this.homePhone = null; + } else { + this.homePhone = in.readString(); + } + break; + + case 9: + if (in.readIndex() != 1) { + in.readNull(); + this.cellPhone = null; + } else { + this.cellPhone = in.readString(); + } + break; + + case 10: + if (in.readIndex() != 1) { + in.readNull(); + this.email = null; + } else { + this.email = in.readString(); + } + break; + + case 11: + if (in.readIndex() != 1) { + in.readNull(); + this.website = null; + } else { + this.website = in.readString(); + } + break; + + default: + throw new java.io.IOException("Corrupt ResolvingDecoder."); + } + } + } + } +} + + + + + + + + + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/HandlerMSK.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/HandlerMSK.java new file mode 100644 index 000000000..c5ddbdc7e --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/HandlerMSK.java @@ -0,0 +1,275 @@ +//Lambda Runtime delivers a batch of messages to the lambda function +//Each batch of messages has two fields EventSource and EventSourceARN +//Each batch of messages also has a field called Records +//The Records is a map with multiple keys and values +//Each key is a combination of the Topic Name and the Partition Number +//One batch of messages can contain messages from multiple partitions + +/* +To simplify representing a batch of Kafka messages as a list of messages +We have created a Java class called KafkaMessage under the models package +Here we are mapping the structure of an incoming Kafka event to a list of +objects of the KafkaMessage class +*/ + +package com.amazonaws.services.lambda.samples.events.msk; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.LambdaLogger; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import com.amazonaws.services.lambda.runtime.events.KafkaEvent; +import com.amazonaws.services.lambda.runtime.events.KafkaEvent.KafkaEventRecord; + +import java.util.ArrayList; +import java.util.Base64; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; + +public class HandlerMSK implements RequestHandler{ + //We initialize an empty list of the KafkaMessage class + List listOfMessages = new ArrayList(); + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + @Override + public String handleRequest(KafkaEvent event, Context context) { + LambdaLogger logger = context.getLogger(); + logger.log("========== LAMBDA FUNCTION STARTED =========="); + logger.log("Event received: " + gson.toJson(event)); + + String response = new String("200 OK"); + this.listOfMessages = new ArrayList(); + + // Counters for zip code patterns + int zip1000Count = 0; + int zip2000Count = 0; + //Incoming KafkaEvent object has a property called records that is a map + //Each key in the map is a combination of a topic and a partition + Map> record=event.getRecords(); + + if (record == null) { + logger.log("WARNING: Event records map is null"); + return response; + } + + logger.log("Records map size: " + record.size()); + + Set keySet = record.keySet(); + logger.log("Key set size: " + keySet.size()); + logger.log("Keys: " + keySet); + + Iterator iterator = keySet.iterator(); + //We iterate through each of the keys in the map + while (iterator.hasNext()) { + String thisKey=(String)iterator.next(); + logger.log("Processing key: " + thisKey); + + //Using the key we retrieve the value of the map which is a list of KafkaEventRecord + //One object of KafkaEventRecord represents an individual Kafka message + List thisListOfRecords = record.get(thisKey); + + if (thisListOfRecords == null) { + logger.log("WARNING: Record list for key " + thisKey + " is null"); + continue; + } + + logger.log("Record list size for key " + thisKey + ": " + thisListOfRecords.size()); + + //We now iterate through the list of KafkaEventRecords + for(KafkaEventRecord thisRecord : thisListOfRecords) { + logger.log("Processing record..."); + + /* + We initialize a new object of the KafkaMessage class which is a simplified representation in our models package + We then get the fields from each kafka message in the object of KafkaEventRecord class and set them to the fields + of the KafkaRecord class + */ + KafkaMessage thisMessage = new KafkaMessage(); + thisMessage.setTopic(thisRecord.getTopic()); + thisMessage.setPartition(thisRecord.getPartition()); + thisMessage.setOffset(thisRecord.getOffset()); + thisMessage.setTimestamp(thisRecord.getTimestamp()); + thisMessage.setTimestampType(thisRecord.getTimestampType()); + + logger.log("Record metadata - Topic: " + thisRecord.getTopic() + + ", Partition: " + thisRecord.getPartition() + + ", Offset: " + thisRecord.getOffset()); + + String key = thisRecord.getKey(); + String value = thisRecord.getValue(); + + logger.log("Key (base64): " + key); + logger.log("Value (base64): " + value); + + String decodedKey = "null"; + String decodedValue = "null"; + //the key and value inside a kafka message are base64 encrypted and will need to be decrypted + if (null != key) { + logger.log("Decoding key..."); + try { + byte[] decodedKeyBytes = Base64.getDecoder().decode(key); + decodedKey = new String(decodedKeyBytes); + logger.log("Decoded key: " + decodedKey); + } catch (Exception e) { + logger.log("ERROR decoding key: " + e.getMessage()); + } + } else { + logger.log("Key is null"); + } + + if (null != value) { + logger.log("Decoding value..."); + try { + byte[] decodedValueBytes = Base64.getDecoder().decode(value); + logger.log("Value decoded, length: " + decodedValueBytes.length + " bytes"); + + // Print the complete message in hex format + logger.log("Complete message in hex format:"); + logger.log(bytesToHexString(decodedValueBytes, 0)); + + try { + decodedValue = new String(decodedValueBytes); + logger.log("Decoded value as string: " + (decodedValue.length() > 100 ? decodedValue.substring(0, 100) + "..." : decodedValue)); + + // Add more detailed logging for AVRO messages + logger.log("=== AVRO MESSAGE DETAILS ==="); + logger.log("Message appears to be AVRO-formatted. Attempting to extract fields:"); + + // Try to extract some common fields from the AVRO binary data + // This is a simple approach to show some readable content + StringBuilder readableContent = new StringBuilder(); + for (int i = 0; i < decodedValueBytes.length; i++) { + // Skip non-printable characters + if (decodedValueBytes[i] >= 32 && decodedValueBytes[i] < 127) { + readableContent.append((char)decodedValueBytes[i]); + } + } + + String readableString = readableContent.toString(); + logger.log("Readable content extracted from AVRO: " + readableString); + + // Check for zip code patterns + if (readableString.contains("1000")) { + logger.log("FOUND ZIP CODE STARTING WITH 1000"); + } + if (readableString.contains("2000")) { + logger.log("FOUND ZIP CODE STARTING WITH 2000"); + } + + logger.log("=== END AVRO MESSAGE DETAILS ==="); + } catch (Exception e) { + logger.log("ERROR converting bytes to string: " + e.getMessage()); + decodedValue = "Error decoding: " + e.getMessage(); + } + } catch (Exception e) { + logger.log("ERROR decoding value: " + e.getMessage()); + e.printStackTrace(); + } + } else { + logger.log("Value is null"); + } + + thisMessage.setKey(key); + thisMessage.setValue(value); + thisMessage.setDecodedKey(decodedKey); + thisMessage.setDecodedValue(decodedValue); + + //A kafka message can optionally have a list of headers + //the below code is to get the headers, iterate through each header and get its key and value + List headersInThisMessage = new ArrayList(); + List> headers = thisRecord.getHeaders(); + + if (headers != null) { + logger.log("Headers count: " + headers.size()); + + for (Map thisHeader : headers) { + Set thisHeaderKeys = thisHeader.keySet(); + Iterator thisHeaderKeysIterator = thisHeaderKeys.iterator(); + while (thisHeaderKeysIterator.hasNext()) { + String thisHeaderKey = thisHeaderKeysIterator.next(); + byte[] thisHeaderValue = (byte[])thisHeader.get(thisHeaderKey); + String thisHeaderValueString = new String(thisHeaderValue); + KafkaHeader thisMessageHeader = new KafkaHeader(); + thisMessageHeader.setKey(thisHeaderKey); + thisMessageHeader.setValue(thisHeaderValueString); + headersInThisMessage.add(thisMessageHeader); + logger.log("Header - Key: " + thisHeaderKey + ", Value: " + thisHeaderValueString); + } + } + } else { + logger.log("No headers in message"); + } + + thisMessage.setHeaders(headersInThisMessage); + listOfMessages.add(thisMessage); + + // Below we are logging the particular kafka message in string format using the toString method + // as well as in Json format using gson.toJson function + logger.log("Received this message from Kafka - " + thisMessage.toString()); + logger.log("Message in JSON format : " + gson.toJson(thisMessage)); + + // Add a more readable summary of the message + logger.log("=== MESSAGE SUMMARY ==="); + logger.log("Topic: " + thisMessage.getTopic()); + logger.log("Partition: " + thisMessage.getPartition()); + logger.log("Offset: " + thisMessage.getOffset()); + logger.log("Key: " + thisMessage.getDecodedKey()); + + // Check for zip code patterns in the decoded value + String decodedValueStr = thisMessage.getDecodedValue(); + if (decodedValueStr != null) { + if (decodedValueStr.contains("1000")) { + logger.log("ZIP CODE: Found 1000 pattern in message"); + zip1000Count++; + } + if (decodedValueStr.contains("2000")) { + logger.log("ZIP CODE: Found 2000 pattern in message"); + zip2000Count++; + } + } + + logger.log("=== END MESSAGE SUMMARY ==="); + } + } + logger.log("All Messages in this batch = " + gson.toJson(listOfMessages)); + + // Log summary of zip code distribution + logger.log("========== ZIP CODE DISTRIBUTION SUMMARY =========="); + logger.log("Messages with zip code containing 1000: " + zip1000Count); + logger.log("Messages with zip code containing 2000: " + zip2000Count); + logger.log("Other messages: " + (listOfMessages.size() - zip1000Count - zip2000Count)); + logger.log("===================================================="); + + logger.log("========== LAMBDA FUNCTION COMPLETED =========="); + return response; + } + + /** + * Convert byte array to hexadecimal string representation + * + * @param bytes Byte array to convert + * @param maxLength Maximum number of bytes to convert (0 for all) + * @return Hexadecimal string representation + */ + private String bytesToHexString(byte[] bytes, int maxLength) { + StringBuilder sb = new StringBuilder(); + int length = maxLength > 0 && maxLength < bytes.length ? maxLength : bytes.length; + + for (int i = 0; i < length; i++) { + sb.append(String.format("%02X", bytes[i])); + if (i % 16 == 15) { + sb.append("\n"); + } else if (i % 4 == 3) { + sb.append(" "); + } + } + + if (maxLength > 0 && length < bytes.length) { + sb.append("... (").append(bytes.length - length).append(" more bytes)"); + } + + return sb.toString(); + } +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaHeader.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaHeader.java new file mode 100644 index 000000000..d093d05ee --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaHeader.java @@ -0,0 +1,67 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import java.util.Objects; + +public class KafkaHeader { + String key; + String value; + /** + * + */ + public KafkaHeader() { + super(); + } + /** + * @param key + * @param value + */ + public KafkaHeader(String key, String value) { + super(); + this.key = key; + this.value = value; + } + /** + * @return the key + */ + public String getKey() { + return key; + } + /** + * @param key the key to set + */ + public void setKey(String key) { + this.key = key; + } + /** + * @return the value + */ + public String getValue() { + return value; + } + /** + * @param value the value to set + */ + public void setValue(String value) { + this.value = value; + } + @Override + public int hashCode() { + return Objects.hash(key, value); + } + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + KafkaHeader other = (KafkaHeader) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + @Override + public String toString() { + return "KafkaHeader [key=" + key + ", value=" + value + "]"; + } + +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaMessage.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaMessage.java new file mode 100644 index 000000000..98422a29f --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaMessage.java @@ -0,0 +1,198 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import java.util.List; +import java.util.Objects; + +public class KafkaMessage { + String topic; + int partition; + long offset; + long timestamp; + String timestampType; + String key; + String value; + String decodedKey; + String decodedValue; + List headers; + /** + * + */ + public KafkaMessage() { + super(); + } + /** + * @param topic + * @param partition + * @param offset + * @param timestamp + * @param timestampType + * @param key + * @param value + * @param decodedKey + * @param decodedValue + * @param headers + */ + public KafkaMessage(String topic, int partition, long offset, long timestamp, String timestampType, String key, String value, + String decodedKey, String decodedValue, List headers) { + super(); + this.topic = topic; + this.partition = partition; + this.offset = offset; + this.timestamp = timestamp; + this.timestampType = timestampType; + this.key = key; + this.value = value; + this.decodedKey = decodedKey; + this.decodedValue = decodedValue; + this.headers = headers; + } + /** + * @return the topic + */ + public String getTopic() { + return topic; + } + /** + * @param topic the topic to set + */ + public void setTopic(String topic) { + this.topic = topic; + } + /** + * @return the partition + */ + public int getPartition() { + return partition; + } + /** + * @param partition the partition to set + */ + public void setPartition(int partition) { + this.partition = partition; + } + /** + * @return the offset + */ + public long getOffset() { + return offset; + } + /** + * @param offset the offset to set + */ + public void setOffset(long offset) { + this.offset = offset; + } + /** + * @return the timestamp + */ + public long getTimestamp() { + return timestamp; + } + /** + * @param timestamp the timestamp to set + */ + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + /** + * @return the timestampType + */ + public String getTimestampType() { + return timestampType; + } + /** + * @param timestampType the timestampType to set + */ + public void setTimestampType(String timestampType) { + this.timestampType = timestampType; + } + /** + * @return the key + */ + public String getKey() { + return key; + } + /** + * @param key the key to set + */ + public void setKey(String key) { + this.key = key; + } + /** + * @return the value + */ + public String getValue() { + return value; + } + /** + * @param value the value to set + */ + public void setValue(String value) { + this.value = value; + } + /** + * @return the decodedKey + */ + public String getDecodedKey() { + return decodedKey; + } + /** + * @param decodedKey the decodedKey to set + */ + public void setDecodedKey(String decodedKey) { + this.decodedKey = decodedKey; + } + /** + * @return the decodedValue + */ + public String getDecodedValue() { + return decodedValue; + } + /** + * @param decodedValue the decodedValue to set + */ + public void setDecodedValue(String decodedValue) { + this.decodedValue = decodedValue; + } + /** + * @return the headers + */ + public List getHeaders() { + return headers; + } + /** + * @param headers the headers to set + */ + public void setHeaders(List headers) { + this.headers = headers; + } + + @Override + public int hashCode() { + return Objects.hash(decodedKey, decodedValue, headers, key, offset, partition, timestamp, timestampType, topic, + value); + } + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + KafkaMessage other = (KafkaMessage) obj; + return Objects.equals(decodedKey, other.decodedKey) && Objects.equals(decodedValue, other.decodedValue) + && Objects.equals(headers, other.headers) && Objects.equals(key, other.key) && offset == other.offset + && partition == other.partition && timestamp == other.timestamp + && Objects.equals(timestampType, other.timestampType) && Objects.equals(topic, other.topic) + && Objects.equals(value, other.value); + } + @Override + public String toString() { + return "KafkaMessage [topic=" + topic + ", partition=" + partition + ", timestamp=" + timestamp + + ", timestampType=" + timestampType + ", key=" + key + ", value=" + value + ", decodedKey=" + + decodedKey + ", decodedValue=" + decodedValue + ", headers=" + headers.toString() + "]"; + } + + +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/SimpleHandler.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/SimpleHandler.java new file mode 100644 index 000000000..03c4e5092 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/SimpleHandler.java @@ -0,0 +1,18 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; + +public class SimpleHandler implements RequestHandler { + + @Override + public String handleRequest(Object event, Context context) { + System.out.println("=== SIMPLE HANDLER CALLED ==="); + System.out.println("Event: " + event); + System.out.println("Event class: " + (event != null ? event.getClass().getName() : "null")); + System.out.println("Context: " + context.getFunctionName()); + System.out.println("=== SIMPLE HANDLER END ==="); + + return "Simple handler executed successfully"; + } +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/resources/log4j2.xml b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/resources/log4j2.xml new file mode 100644 index 000000000..0221c694e --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/main/resources/log4j2.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/HandlerMSKTest.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/HandlerMSKTest.java new file mode 100644 index 000000000..65bd543d3 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/HandlerMSKTest.java @@ -0,0 +1,70 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.events.KafkaEvent; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; + + +class HandlerMSKTest { + private static final String kafkaEventJson = "{\n" + + " \"records\":{\n" + + " \"myTopic-0\":[\n" + + " {\n" + + " \"topic\":\"myTopic\",\n" + + " \"partition\":0,\n" + + " \"offset\":250,\n" + + " \"timestamp\":1678072110111,\n" + + " \"timestampType\":\"CREATE_TIME\",\n" + + " \"value\":\"Zg==\",\n" + + " \"headers\":[\n" + + " \n" + + " ]\n" + + " },\n" + + " {\n" + + " \"topic\":\"myTopic\",\n" + + " \"partition\":0,\n" + + " \"offset\":251,\n" + + " \"timestamp\":1678072111086,\n" + + " \"timestampType\":\"CREATE_TIME\",\n" + + " \"value\":\"Zw==\",\n" + + " \"headers\":[\n" + + " \n" + + " ]\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"eventSource\":\"aws:kafka\",\n" + + " \"eventSourceArn\":\"arn:aws:kafka:us-west-2:123456789012:cluster/MSKWorkshopCluster/a93759a9-c9d0-4952-984c-492c6bfa2be8-13\",\n" + + " \"bootstrapServers\":\"b-2.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098,b-3.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098,b-1.mskworkshopcluster.z9kc4f.c13.kafka.us-west-2.amazonaws.com:9098\"\n" + + "}"; + + @Test + void invokeTest() { + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + KafkaEvent event = gson.fromJson(kafkaEventJson, KafkaEvent.class); + Context context = new TestContext(); + HandlerMSK handler = new HandlerMSK(); + String result = handler.handleRequest(event, context); + assertEquals(result, "200 OK"); + assertEquals(handler.listOfMessages.size(), 2); + assertEquals(handler.listOfMessages.get(0).getTopic(), "myTopic"); + assertEquals(handler.listOfMessages.get(0).getPartition(), 0); + assertEquals(handler.listOfMessages.get(0).getOffset(), 250L); + assertEquals(handler.listOfMessages.get(0).getTimestamp(), 1678072110111L); + assertEquals(handler.listOfMessages.get(0).getTimestampType(), "CREATE_TIME"); + assertEquals(handler.listOfMessages.get(0).getDecodedKey(), "null"); + assertEquals(handler.listOfMessages.get(0).getDecodedValue(), "f"); + assertEquals(handler.listOfMessages.get(1).getTopic(), "myTopic"); + assertEquals(handler.listOfMessages.get(1).getPartition(), 0); + assertEquals(handler.listOfMessages.get(1).getOffset(), 251L); + assertEquals(handler.listOfMessages.get(1).getTimestamp(), 1678072111086L); + assertEquals(handler.listOfMessages.get(1).getTimestampType(), "CREATE_TIME"); + assertEquals(handler.listOfMessages.get(1).getDecodedKey(), "null"); + assertEquals(handler.listOfMessages.get(1).getDecodedValue(), "g"); + } +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/TestContext.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/TestContext.java new file mode 100644 index 000000000..479a3b98a --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/TestContext.java @@ -0,0 +1,45 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.CognitoIdentity; +import com.amazonaws.services.lambda.runtime.ClientContext; +import com.amazonaws.services.lambda.runtime.LambdaLogger; + +public class TestContext implements Context{ + + public TestContext() {} + public String getAwsRequestId(){ + return new String("495b12a8-xmpl-4eca-8168-160484189f99"); + } + public String getLogGroupName(){ + return new String("/aws/lambda/my-function"); + } + public String getLogStreamName(){ + return new String("2020/02/26/[$LATEST]704f8dxmpla04097b9134246b8438f1a"); + } + public String getFunctionName(){ + return new String("my-function"); + } + public String getFunctionVersion(){ + return new String("$LATEST"); + } + public String getInvokedFunctionArn(){ + return new String("arn:aws:lambda:us-east-2:123456789012:function:my-function"); + } + public CognitoIdentity getIdentity(){ + return null; + } + public ClientContext getClientContext(){ + return null; + } + public int getRemainingTimeInMillis(){ + return 300000; + } + public int getMemoryLimitInMB(){ + return 512; + } + public LambdaLogger getLogger(){ + return new TestLogger(); + } + +} \ No newline at end of file diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/TestLogger.java b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/TestLogger.java new file mode 100644 index 000000000..0fb16cdc6 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_consumer_function/src/test/java/com/amazonaws/services/lambda/samples/events/msk/TestLogger.java @@ -0,0 +1,14 @@ +package com.amazonaws.services.lambda.samples.events.msk; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import com.amazonaws.services.lambda.runtime.LambdaLogger; + +public class TestLogger implements LambdaLogger { + private static final Logger logger = LoggerFactory.getLogger(TestLogger.class); + public void log(String message){ + logger.info(message); + } + public void log(byte[] message){ + logger.info(new String(message)); + } +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.classpath b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.classpath new file mode 100644 index 000000000..f0e98045f --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.classpath @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.project b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.project new file mode 100644 index 000000000..1359d9ca5 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.project @@ -0,0 +1,23 @@ + + + kafka_event_producer_function + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.m2e.core.maven2Builder + + + + + + org.eclipse.jdt.core.javanature + org.eclipse.m2e.core.maven2Nature + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.settings/org.eclipse.jdt.core.prefs b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 000000000..2af1e7b99 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,8 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.targetPlatform=11 +org.eclipse.jdt.core.compiler.compliance=11 +org.eclipse.jdt.core.compiler.problem.enablePreviewFeatures=disabled +org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning +org.eclipse.jdt.core.compiler.problem.reportPreviewFeatures=ignore +org.eclipse.jdt.core.compiler.release=disabled +org.eclipse.jdt.core.compiler.source=11 diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/pom.xml b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/pom.xml new file mode 100644 index 000000000..897989544 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/pom.xml @@ -0,0 +1,115 @@ + + 4.0.0 + com.amazonaws.services.lambda.samples.events.msk + MSKProducer + 1.0 + jar + A sample Lambda MSK AVRO producer + + 11 + 11 + 1.11.3 + + + + + com.amazonaws + aws-lambda-java-core + 1.2.1 + + + com.amazonaws + aws-lambda-java-events + 3.11.0 + + + com.google.code.gson + gson + 2.10.1 + + + org.apache.kafka + kafka-clients + 3.9.1 + + + + org.apache.avro + avro + ${avro.version} + + + + software.amazon.awssdk + glue + 2.20.160 + + + + software.amazon.awssdk + kafka + 2.20.160 + + + + software.amazon.msk + aws-msk-iam-auth + 2.0.3 + + + + software.amazon.glue + schema-registry-serde + 1.1.15 + + + org.junit.jupiter + junit-jupiter-api + 5.6.0 + test + + + org.junit.jupiter + junit-jupiter-engine + 5.6.0 + test + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 3.2.4 + + + + package + + shade + + + + + + org.apache.avro + avro-maven-plugin + 1.11.3 + + + generate-sources + + schema + + + ${project.basedir}/src/main/avro/ + ${project.basedir}/src/main/java/ + String + + + + + + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/avro/contact.avsc b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/avro/contact.avsc new file mode 100644 index 000000000..13a00de1e --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/avro/contact.avsc @@ -0,0 +1,19 @@ +{ + "type": "record", + "name": "Contact", + "namespace": "com.amazonaws.services.lambda.samples.events.msk", + "fields": [ + {"name": "firstname", "type": ["null", "string"], "default": null}, + {"name": "lastname", "type": ["null", "string"], "default": null}, + {"name": "company", "type": ["null", "string"], "default": null}, + {"name": "street", "type": ["null", "string"], "default": null}, + {"name": "city", "type": ["null", "string"], "default": null}, + {"name": "county", "type": ["null", "string"], "default": null}, + {"name": "state", "type": ["null", "string"], "default": null}, + {"name": "zip", "type": ["null", "string"], "default": null}, + {"name": "homePhone", "type": ["null", "string"], "default": null}, + {"name": "cellPhone", "type": ["null", "string"], "default": null}, + {"name": "email", "type": ["null", "string"], "default": null}, + {"name": "website", "type": ["null", "string"], "default": null} + ] +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroProducerHandler.java b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroProducerHandler.java new file mode 100644 index 000000000..9535868d4 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroProducerHandler.java @@ -0,0 +1,239 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.LambdaLogger; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import org.apache.kafka.clients.producer.Producer; + +import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.*; + +import java.util.Map; +import java.util.UUID; + +/** + * Lambda function handler that produces AVRO messages to a Kafka topic + */ +public class AvroProducerHandler implements RequestHandler, String> { + + private final Gson gson = new GsonBuilder().setPrettyPrinting().create(); + + @Override + public String handleRequest(Map event, Context context) { + LambdaLogger logger = context.getLogger(); + logger.log("Received event: " + gson.toJson(event)); + + // Initialize counters for zip code distribution + int messageCount = 10; + int zip1000Count = 0; + int zip2000Count = 0; + + try { + // Get environment variables + String mskClusterArn = System.getenv("MSK_CLUSTER_ARN"); + String kafkaTopic = System.getenv("MSK_TOPIC"); + String schemaName = System.getenv("CONTACT_SCHEMA_NAME"); + String region = System.getenv("AWS_REGION"); + String registryName = System.getenv("REGISTRY_NAME") != null ? + System.getenv("REGISTRY_NAME") : "default-registry"; + + if (mskClusterArn == null || kafkaTopic == null || schemaName == null) { + throw new RuntimeException("Required environment variables not set: MSK_CLUSTER_ARN, KAFKA_TOPIC, CONTACT_SCHEMA_NAME"); + } + + // Log that we're generating zip codes with different prefixes + logger.log("Generating contacts with zip codes starting with 1000 (50% chance) or 2000 (50% chance)"); + + // Create a Contact object from the input event or use default values + Contact contact = createContactFromEvent(event); + logger.log("Created contact: " + gson.toJson(contact)); + + // Get bootstrap brokers + String bootstrapBrokers = KafkaProducerHelper.getBootstrapBrokers(mskClusterArn); + logger.log("Using bootstrap brokers: " + bootstrapBrokers); + + // Log the topic name for debugging + logger.log("Target Kafka topic: '" + kafkaTopic + "'"); + + // Create Kafka producer with AWS Glue Schema Registry serializer + try (Producer producer = KafkaProducerHelper.createProducer( + bootstrapBrokers, region, registryName, schemaName)) { + + // Log producer configuration + logger.log("Created Kafka producer with AWS Glue Schema Registry serializer"); + logger.log("Registry name: " + registryName); + logger.log("Schema name: " + schemaName); + + // Send 10 messages + logger.log("Sending " + messageCount + " AVRO messages to topic: " + kafkaTopic); + + for (int i = 0; i < messageCount; i++) { + // Generate a random key for each message + String messageKey = UUID.randomUUID().toString(); + + // Create a new contact for each message to ensure variety + Contact messageContact = createContactFromEvent(event); + + // Print the contact details before sending (Contact is now a SpecificRecord) + logger.log("Sending contact #" + (i+1) + ": " + gson.toJson(messageContact)); + logger.log("AVRO record #" + (i+1) + ": " + messageContact.toString()); + + // Log the zip code prefix for distribution tracking + String zipCode = messageContact.getZip(); + if (zipCode != null && zipCode.length() >= 4) { + String prefix = zipCode.substring(0, 4); + logger.log("Contact #" + (i+1) + " zip code prefix: " + prefix); + + // Count zip codes by prefix + if ("1000".equals(prefix)) { + zip1000Count++; + } else if ("2000".equals(prefix)) { + zip2000Count++; + } + } + + // Send the message (Contact is now a SpecificRecord) + KafkaProducerHelper.sendAvroMessage(producer, kafkaTopic, messageKey, messageContact); + logger.log("Successfully sent AVRO message #" + (i+1) + " to topic: " + kafkaTopic); + } + + // Log summary of zip code distribution + logger.log("ZIP CODE DISTRIBUTION SUMMARY:"); + logger.log("Messages with zip code starting with 1000: " + zip1000Count); + logger.log("Messages with zip code starting with 2000: " + zip2000Count); + logger.log("Other zip code formats: " + (messageCount - zip1000Count - zip2000Count)); + } + + return "Successfully sent " + messageCount + " AVRO messages to Kafka topic: " + kafkaTopic + + " (Zip codes: " + zip1000Count + " with prefix 1000, " + zip2000Count + " with prefix 2000)"; + } catch (Exception e) { + logger.log("Error sending AVRO message: " + e.getMessage()); + e.printStackTrace(); + throw new RuntimeException("Failed to send AVRO message: " + e.getMessage(), e); + } + } + + /** + * Create a Contact object from the input event or use default values + * + * @param event Input event map + * @return Contact object + */ + private Contact createContactFromEvent(Map event) { + Contact contact = new Contact(); + + // Set fields from event if available, otherwise use default values + contact.setFirstname(getStringValue(event, "firstname", "FirstName-" + randomSuffix())); + contact.setLastname(getStringValue(event, "lastname", "LastName-" + randomSuffix())); + contact.setCompany(getStringValue(event, "company", "Company-" + randomSuffix())); + contact.setStreet(getStringValue(event, "street", "123 Main St")); + contact.setCity(getStringValue(event, "city", "AnyCity")); + contact.setCounty(getStringValue(event, "county", "AnyCounty")); + contact.setState(getStringValue(event, "state", "AnyState")); + + // Generate zip code starting with 1000 50% of the time and 2000 the other 50% + if (event.containsKey("zip") && event.get("zip") != null) { + // If zip is provided in the event, use it as is + contact.setZip(event.get("zip").toString()); + } else { + // 50% chance for each prefix + String prefix = Math.random() < 0.5 ? "1000" : "2000"; + contact.setZip(prefix + randomDigit()); + } + + contact.setHomePhone(getStringValue(event, "homePhone", "555-123-" + randomDigits(4))); + contact.setCellPhone(getStringValue(event, "cellPhone", "555-456-" + randomDigits(4))); + contact.setEmail(getStringValue(event, "email", "user-" + randomSuffix() + "@example.com")); + contact.setWebsite(getStringValue(event, "website", "https://www." + randomSuffix() + ".com")); + + return contact; + } + + /** + * Get a string value from the event map, or return a default value if not found + * + * @param event Input event map + * @param key Key to look for + * @param defaultValue Default value to return if key not found + * @return String value + */ + private String getStringValue(Map event, String key, String defaultValue) { + if (event.containsKey(key) && event.get(key) != null) { + return event.get(key).toString(); + } + return defaultValue; + } + + /** + * Generate a random suffix for default values + * + * @return Random string + */ + private String randomSuffix() { + return UUID.randomUUID().toString().substring(0, 8); + } + + /** + * Generate a random digit + * + * @return Random digit as string + */ + private String randomDigit() { + return Integer.toString((int) (Math.random() * 10)); + } + + /** + * Generate random digits of specified length + * + * @param length Number of digits to generate + * @return Random digits as string + */ + private String randomDigits(int length) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + sb.append(randomDigit()); + } + return sb.toString(); + } + + /** + * Get schema definition from AWS Glue Schema Registry + * + * @param region AWS region + * @param registryName Registry name + * @param schemaName Schema name + * @return Schema definition as a string + */ + private String getSchemaDefinitionFromRegistry(String region, String registryName, String schemaName) { + try { + // Create Glue client with explicit HTTP client + GlueClient glueClient = GlueClient.builder() + .httpClientBuilder(UrlConnectionHttpClient.builder()) + .region(software.amazon.awssdk.regions.Region.of(region)) + .build(); + + // Get schema definition + GetSchemaVersionRequest request = GetSchemaVersionRequest.builder() + .schemaId(SchemaId.builder() + .registryName(registryName) + .schemaName(schemaName) + .build()) + .schemaVersionNumber(SchemaVersionNumber.builder().latestVersion(true).build()) + .build(); + + GetSchemaVersionResponse response = glueClient.getSchemaVersion(request); + String schemaVersionId = response.schemaVersionId(); + String schemaDefinition = response.schemaDefinition(); + + System.out.println("Retrieved schema version ID: " + schemaVersionId); + System.out.println("Retrieved schema definition: " + schemaDefinition); + + return schemaDefinition; + } catch (Exception e) { + throw new RuntimeException("Failed to get schema definition from registry: " + e.getMessage(), e); + } + } +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroSchemaHelper.java b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroSchemaHelper.java new file mode 100644 index 000000000..86cf6041f --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/AvroSchemaHelper.java @@ -0,0 +1,211 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.BinaryEncoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DatumWriter; +import org.apache.avro.io.DecoderFactory; +import org.apache.avro.io.EncoderFactory; +import org.apache.avro.specific.SpecificDatumWriter; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionResponse; +import software.amazon.awssdk.services.glue.model.SchemaId; +import software.amazon.awssdk.services.glue.model.SchemaVersionNumber; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.UUID; + +/** + * Helper class for working with AVRO schemas from AWS Glue Schema Registry + */ +public class AvroSchemaHelper { + + // Schema registry constants + private static final byte HEADER_VERSION_BYTE = 0x00; + + /** + * Get schema definition from AWS Glue Schema Registry + * + * @param schemaName Name of the schema in Glue Schema Registry + * @return Schema definition as a string + */ + public static String getSchemaDefinition(String schemaName) { + try (GlueClient glueClient = GlueClient.builder().build()) { + GetSchemaVersionRequest request = GetSchemaVersionRequest.builder() + .schemaId(SchemaId.builder() + .registryName("default-registry") + .schemaName(schemaName) + .build()) + .schemaVersionNumber(SchemaVersionNumber.builder().latestVersion(true).build()) + .build(); + + GetSchemaVersionResponse response = glueClient.getSchemaVersion(request); + return response.schemaDefinition(); + } catch (Exception e) { + throw new RuntimeException("Failed to get schema definition: " + e.getMessage(), e); + } + } + + /** + * Get schema version ID from AWS Glue Schema Registry + * + * @param schemaName Name of the schema in Glue Schema Registry + * @return Schema version ID as UUID + */ + public static UUID getSchemaVersionId(String schemaName) { + try (GlueClient glueClient = GlueClient.builder().build()) { + GetSchemaVersionRequest request = GetSchemaVersionRequest.builder() + .schemaId(SchemaId.builder() + .registryName("default-registry") + .schemaName(schemaName) + .build()) + .schemaVersionNumber(SchemaVersionNumber.builder().latestVersion(true).build()) + .build(); + + GetSchemaVersionResponse response = glueClient.getSchemaVersion(request); + String schemaVersionId = response.schemaVersionId(); + + // Print the original UUID schema ID + System.out.println("Retrieved schema version ID (UUID): " + schemaVersionId); + + // Return the actual UUID + return UUID.fromString(schemaVersionId); + } catch (Exception e) { + throw new RuntimeException("Failed to get schema version ID: " + e.getMessage(), e); + } + } + + /** + * Create an AVRO record from a Contact object using the schema from Glue Schema Registry + * + * @param schemaDefinition AVRO schema definition + * @param contact Contact object to convert to AVRO + * @param schemaId Schema ID to include in the header + * @return AVRO record as byte array with schema registry header + */ + public static byte[] createAvroRecord(String schemaDefinition, Contact contact, UUID schemaId) { + try { + Schema schema = new Schema.Parser().parse(schemaDefinition); + GenericRecord avroRecord = new GenericData.Record(schema); + + // Populate the record with data from the Contact object + avroRecord.put("firstname", contact.getFirstname()); + avroRecord.put("lastname", contact.getLastname()); + avroRecord.put("company", contact.getCompany()); + avroRecord.put("street", contact.getStreet()); + avroRecord.put("city", contact.getCity()); + avroRecord.put("county", contact.getCounty()); + avroRecord.put("state", contact.getState()); + avroRecord.put("zip", contact.getZip()); + avroRecord.put("homePhone", contact.getHomePhone()); + avroRecord.put("cellPhone", contact.getCellPhone()); + avroRecord.put("email", contact.getEmail()); + avroRecord.put("website", contact.getWebsite()); + + // Serialize the record to a byte array + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(outputStream, null); + DatumWriter datumWriter = new SpecificDatumWriter<>(schema); + datumWriter.write(avroRecord, encoder); + encoder.flush(); + + // Add schema registry header with schema ID + byte[] avroData = outputStream.toByteArray(); + return addSchemaRegistryHeader(avroData, schemaId); + } catch (IOException e) { + throw new RuntimeException("Failed to create AVRO record: " + e.getMessage(), e); + } + } + + /** + * Add schema registry header to AVRO data + * + * @param avroData AVRO serialized data + * @param schemaId Schema ID as UUID + * @return AVRO data with schema registry header + */ + private static byte[] addSchemaRegistryHeader(byte[] avroData, UUID schemaId) { + // Schema registry header format: + // Byte 0: Magic byte (0x00) + // Bytes 1-16: UUID (16 bytes) + ByteBuffer buffer = ByteBuffer.allocate(1 + 16 + avroData.length); + buffer.put((byte) 0x00); // Magic byte + + // Add UUID bytes (16 bytes) + buffer.putLong(schemaId.getMostSignificantBits()); + buffer.putLong(schemaId.getLeastSignificantBits()); + + // Add AVRO data + buffer.put(avroData); + + System.out.println("Added schema registry header with UUID: " + schemaId); + System.out.println("First 17 bytes of message (hex): " + bytesToHex(buffer.array(), 17)); + + return buffer.array(); + } + + /** + * Parse an AVRO message with schema registry header + * + * @param avroMessage Complete AVRO message with schema registry header + * @param schemaDefinition AVRO schema definition + * @return Parsed GenericRecord + */ + public static GenericRecord parseAvroMessage(byte[] avroMessage, String schemaDefinition) { + try { + // Check if this is a valid message with schema registry header + if (avroMessage.length <= 17 || avroMessage[0] != 0x00) { + throw new IllegalArgumentException("Invalid AVRO message format: missing or invalid schema registry header"); + } + + // Extract the AVRO data (skip the 17-byte header) + byte[] avroData = Arrays.copyOfRange(avroMessage, 17, avroMessage.length); + + // Parse the schema + Schema schema = new Schema.Parser().parse(schemaDefinition); + + // Create a datum reader for the schema + DatumReader datumReader = new GenericDatumReader<>(schema); + + // Create a decoder for the AVRO data + org.apache.avro.io.Decoder decoder = DecoderFactory.get().binaryDecoder(avroData, null); + + // Read the record + return datumReader.read(null, decoder); + } catch (Exception e) { + throw new RuntimeException("Failed to parse AVRO message: " + e.getMessage(), e); + } + } + + /** + * Convert byte array to hexadecimal string + * + * @param bytes Byte array to convert + * @param maxLength Maximum number of bytes to convert (0 for all) + * @return Hexadecimal string + */ + private static String bytesToHex(byte[] bytes, int maxLength) { + StringBuilder sb = new StringBuilder(); + int length = maxLength > 0 && maxLength < bytes.length ? maxLength : bytes.length; + + for (int i = 0; i < length; i++) { + sb.append(String.format("%02X", bytes[i])); + if (i % 4 == 3 && i < length - 1) { + sb.append(" "); + } + } + + if (maxLength > 0 && length < bytes.length) { + sb.append("..."); + } + + return sb.toString(); + } +} diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/Contact.java b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/Contact.java new file mode 100644 index 000000000..3204f2bca --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/Contact.java @@ -0,0 +1,1391 @@ +/** + * Autogenerated by Avro + * + * DO NOT EDIT DIRECTLY + */ +package com.amazonaws.services.lambda.samples.events.msk; + +import org.apache.avro.generic.GenericArray; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.util.Utf8; +import org.apache.avro.message.BinaryMessageEncoder; +import org.apache.avro.message.BinaryMessageDecoder; +import org.apache.avro.message.SchemaStore; + +@org.apache.avro.specific.AvroGenerated +public class Contact extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { + private static final long serialVersionUID = -4035028153225992319L; + + + public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Contact\",\"namespace\":\"com.amazonaws.services.lambda.samples.events.msk\",\"fields\":[{\"name\":\"firstname\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"lastname\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"company\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"street\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"city\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"county\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"state\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"zip\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"homePhone\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"cellPhone\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"email\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null},{\"name\":\"website\",\"type\":[\"null\",{\"type\":\"string\",\"avro.java.string\":\"String\"}],\"default\":null}]}"); + public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } + + private static final SpecificData MODEL$ = new SpecificData(); + + private static final BinaryMessageEncoder ENCODER = + new BinaryMessageEncoder<>(MODEL$, SCHEMA$); + + private static final BinaryMessageDecoder DECODER = + new BinaryMessageDecoder<>(MODEL$, SCHEMA$); + + /** + * Return the BinaryMessageEncoder instance used by this class. + * @return the message encoder used by this class + */ + public static BinaryMessageEncoder getEncoder() { + return ENCODER; + } + + /** + * Return the BinaryMessageDecoder instance used by this class. + * @return the message decoder used by this class + */ + public static BinaryMessageDecoder getDecoder() { + return DECODER; + } + + /** + * Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}. + * @param resolver a {@link SchemaStore} used to find schemas by fingerprint + * @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore + */ + public static BinaryMessageDecoder createDecoder(SchemaStore resolver) { + return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver); + } + + /** + * Serializes this Contact to a ByteBuffer. + * @return a buffer holding the serialized data for this instance + * @throws java.io.IOException if this instance could not be serialized + */ + public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException { + return ENCODER.encode(this); + } + + /** + * Deserializes a Contact from a ByteBuffer. + * @param b a byte buffer holding serialized data for an instance of this class + * @return a Contact instance decoded from the given buffer + * @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class + */ + public static Contact fromByteBuffer( + java.nio.ByteBuffer b) throws java.io.IOException { + return DECODER.decode(b); + } + + private java.lang.String firstname; + private java.lang.String lastname; + private java.lang.String company; + private java.lang.String street; + private java.lang.String city; + private java.lang.String county; + private java.lang.String state; + private java.lang.String zip; + private java.lang.String homePhone; + private java.lang.String cellPhone; + private java.lang.String email; + private java.lang.String website; + + /** + * Default constructor. Note that this does not initialize fields + * to their default values from the schema. If that is desired then + * one should use newBuilder(). + */ + public Contact() {} + + /** + * All-args constructor. + * @param firstname The new value for firstname + * @param lastname The new value for lastname + * @param company The new value for company + * @param street The new value for street + * @param city The new value for city + * @param county The new value for county + * @param state The new value for state + * @param zip The new value for zip + * @param homePhone The new value for homePhone + * @param cellPhone The new value for cellPhone + * @param email The new value for email + * @param website The new value for website + */ + public Contact(java.lang.String firstname, java.lang.String lastname, java.lang.String company, java.lang.String street, java.lang.String city, java.lang.String county, java.lang.String state, java.lang.String zip, java.lang.String homePhone, java.lang.String cellPhone, java.lang.String email, java.lang.String website) { + this.firstname = firstname; + this.lastname = lastname; + this.company = company; + this.street = street; + this.city = city; + this.county = county; + this.state = state; + this.zip = zip; + this.homePhone = homePhone; + this.cellPhone = cellPhone; + this.email = email; + this.website = website; + } + + @Override + public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; } + + @Override + public org.apache.avro.Schema getSchema() { return SCHEMA$; } + + // Used by DatumWriter. Applications should not call. + @Override + public java.lang.Object get(int field$) { + switch (field$) { + case 0: return firstname; + case 1: return lastname; + case 2: return company; + case 3: return street; + case 4: return city; + case 5: return county; + case 6: return state; + case 7: return zip; + case 8: return homePhone; + case 9: return cellPhone; + case 10: return email; + case 11: return website; + default: throw new IndexOutOfBoundsException("Invalid index: " + field$); + } + } + + // Used by DatumReader. Applications should not call. + @Override + @SuppressWarnings(value="unchecked") + public void put(int field$, java.lang.Object value$) { + switch (field$) { + case 0: firstname = value$ != null ? value$.toString() : null; break; + case 1: lastname = value$ != null ? value$.toString() : null; break; + case 2: company = value$ != null ? value$.toString() : null; break; + case 3: street = value$ != null ? value$.toString() : null; break; + case 4: city = value$ != null ? value$.toString() : null; break; + case 5: county = value$ != null ? value$.toString() : null; break; + case 6: state = value$ != null ? value$.toString() : null; break; + case 7: zip = value$ != null ? value$.toString() : null; break; + case 8: homePhone = value$ != null ? value$.toString() : null; break; + case 9: cellPhone = value$ != null ? value$.toString() : null; break; + case 10: email = value$ != null ? value$.toString() : null; break; + case 11: website = value$ != null ? value$.toString() : null; break; + default: throw new IndexOutOfBoundsException("Invalid index: " + field$); + } + } + + /** + * Gets the value of the 'firstname' field. + * @return The value of the 'firstname' field. + */ + public java.lang.String getFirstname() { + return firstname; + } + + + /** + * Sets the value of the 'firstname' field. + * @param value the value to set. + */ + public void setFirstname(java.lang.String value) { + this.firstname = value; + } + + /** + * Gets the value of the 'lastname' field. + * @return The value of the 'lastname' field. + */ + public java.lang.String getLastname() { + return lastname; + } + + + /** + * Sets the value of the 'lastname' field. + * @param value the value to set. + */ + public void setLastname(java.lang.String value) { + this.lastname = value; + } + + /** + * Gets the value of the 'company' field. + * @return The value of the 'company' field. + */ + public java.lang.String getCompany() { + return company; + } + + + /** + * Sets the value of the 'company' field. + * @param value the value to set. + */ + public void setCompany(java.lang.String value) { + this.company = value; + } + + /** + * Gets the value of the 'street' field. + * @return The value of the 'street' field. + */ + public java.lang.String getStreet() { + return street; + } + + + /** + * Sets the value of the 'street' field. + * @param value the value to set. + */ + public void setStreet(java.lang.String value) { + this.street = value; + } + + /** + * Gets the value of the 'city' field. + * @return The value of the 'city' field. + */ + public java.lang.String getCity() { + return city; + } + + + /** + * Sets the value of the 'city' field. + * @param value the value to set. + */ + public void setCity(java.lang.String value) { + this.city = value; + } + + /** + * Gets the value of the 'county' field. + * @return The value of the 'county' field. + */ + public java.lang.String getCounty() { + return county; + } + + + /** + * Sets the value of the 'county' field. + * @param value the value to set. + */ + public void setCounty(java.lang.String value) { + this.county = value; + } + + /** + * Gets the value of the 'state' field. + * @return The value of the 'state' field. + */ + public java.lang.String getState() { + return state; + } + + + /** + * Sets the value of the 'state' field. + * @param value the value to set. + */ + public void setState(java.lang.String value) { + this.state = value; + } + + /** + * Gets the value of the 'zip' field. + * @return The value of the 'zip' field. + */ + public java.lang.String getZip() { + return zip; + } + + + /** + * Sets the value of the 'zip' field. + * @param value the value to set. + */ + public void setZip(java.lang.String value) { + this.zip = value; + } + + /** + * Gets the value of the 'homePhone' field. + * @return The value of the 'homePhone' field. + */ + public java.lang.String getHomePhone() { + return homePhone; + } + + + /** + * Sets the value of the 'homePhone' field. + * @param value the value to set. + */ + public void setHomePhone(java.lang.String value) { + this.homePhone = value; + } + + /** + * Gets the value of the 'cellPhone' field. + * @return The value of the 'cellPhone' field. + */ + public java.lang.String getCellPhone() { + return cellPhone; + } + + + /** + * Sets the value of the 'cellPhone' field. + * @param value the value to set. + */ + public void setCellPhone(java.lang.String value) { + this.cellPhone = value; + } + + /** + * Gets the value of the 'email' field. + * @return The value of the 'email' field. + */ + public java.lang.String getEmail() { + return email; + } + + + /** + * Sets the value of the 'email' field. + * @param value the value to set. + */ + public void setEmail(java.lang.String value) { + this.email = value; + } + + /** + * Gets the value of the 'website' field. + * @return The value of the 'website' field. + */ + public java.lang.String getWebsite() { + return website; + } + + + /** + * Sets the value of the 'website' field. + * @param value the value to set. + */ + public void setWebsite(java.lang.String value) { + this.website = value; + } + + /** + * Creates a new Contact RecordBuilder. + * @return A new Contact RecordBuilder + */ + public static com.amazonaws.services.lambda.samples.events.msk.Contact.Builder newBuilder() { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(); + } + + /** + * Creates a new Contact RecordBuilder by copying an existing Builder. + * @param other The existing builder to copy. + * @return A new Contact RecordBuilder + */ + public static com.amazonaws.services.lambda.samples.events.msk.Contact.Builder newBuilder(com.amazonaws.services.lambda.samples.events.msk.Contact.Builder other) { + if (other == null) { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(); + } else { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(other); + } + } + + /** + * Creates a new Contact RecordBuilder by copying an existing Contact instance. + * @param other The existing instance to copy. + * @return A new Contact RecordBuilder + */ + public static com.amazonaws.services.lambda.samples.events.msk.Contact.Builder newBuilder(com.amazonaws.services.lambda.samples.events.msk.Contact other) { + if (other == null) { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(); + } else { + return new com.amazonaws.services.lambda.samples.events.msk.Contact.Builder(other); + } + } + + /** + * RecordBuilder for Contact instances. + */ + @org.apache.avro.specific.AvroGenerated + public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase + implements org.apache.avro.data.RecordBuilder { + + private java.lang.String firstname; + private java.lang.String lastname; + private java.lang.String company; + private java.lang.String street; + private java.lang.String city; + private java.lang.String county; + private java.lang.String state; + private java.lang.String zip; + private java.lang.String homePhone; + private java.lang.String cellPhone; + private java.lang.String email; + private java.lang.String website; + + /** Creates a new Builder */ + private Builder() { + super(SCHEMA$, MODEL$); + } + + /** + * Creates a Builder by copying an existing Builder. + * @param other The existing Builder to copy. + */ + private Builder(com.amazonaws.services.lambda.samples.events.msk.Contact.Builder other) { + super(other); + if (isValidValue(fields()[0], other.firstname)) { + this.firstname = data().deepCopy(fields()[0].schema(), other.firstname); + fieldSetFlags()[0] = other.fieldSetFlags()[0]; + } + if (isValidValue(fields()[1], other.lastname)) { + this.lastname = data().deepCopy(fields()[1].schema(), other.lastname); + fieldSetFlags()[1] = other.fieldSetFlags()[1]; + } + if (isValidValue(fields()[2], other.company)) { + this.company = data().deepCopy(fields()[2].schema(), other.company); + fieldSetFlags()[2] = other.fieldSetFlags()[2]; + } + if (isValidValue(fields()[3], other.street)) { + this.street = data().deepCopy(fields()[3].schema(), other.street); + fieldSetFlags()[3] = other.fieldSetFlags()[3]; + } + if (isValidValue(fields()[4], other.city)) { + this.city = data().deepCopy(fields()[4].schema(), other.city); + fieldSetFlags()[4] = other.fieldSetFlags()[4]; + } + if (isValidValue(fields()[5], other.county)) { + this.county = data().deepCopy(fields()[5].schema(), other.county); + fieldSetFlags()[5] = other.fieldSetFlags()[5]; + } + if (isValidValue(fields()[6], other.state)) { + this.state = data().deepCopy(fields()[6].schema(), other.state); + fieldSetFlags()[6] = other.fieldSetFlags()[6]; + } + if (isValidValue(fields()[7], other.zip)) { + this.zip = data().deepCopy(fields()[7].schema(), other.zip); + fieldSetFlags()[7] = other.fieldSetFlags()[7]; + } + if (isValidValue(fields()[8], other.homePhone)) { + this.homePhone = data().deepCopy(fields()[8].schema(), other.homePhone); + fieldSetFlags()[8] = other.fieldSetFlags()[8]; + } + if (isValidValue(fields()[9], other.cellPhone)) { + this.cellPhone = data().deepCopy(fields()[9].schema(), other.cellPhone); + fieldSetFlags()[9] = other.fieldSetFlags()[9]; + } + if (isValidValue(fields()[10], other.email)) { + this.email = data().deepCopy(fields()[10].schema(), other.email); + fieldSetFlags()[10] = other.fieldSetFlags()[10]; + } + if (isValidValue(fields()[11], other.website)) { + this.website = data().deepCopy(fields()[11].schema(), other.website); + fieldSetFlags()[11] = other.fieldSetFlags()[11]; + } + } + + /** + * Creates a Builder by copying an existing Contact instance + * @param other The existing instance to copy. + */ + private Builder(com.amazonaws.services.lambda.samples.events.msk.Contact other) { + super(SCHEMA$, MODEL$); + if (isValidValue(fields()[0], other.firstname)) { + this.firstname = data().deepCopy(fields()[0].schema(), other.firstname); + fieldSetFlags()[0] = true; + } + if (isValidValue(fields()[1], other.lastname)) { + this.lastname = data().deepCopy(fields()[1].schema(), other.lastname); + fieldSetFlags()[1] = true; + } + if (isValidValue(fields()[2], other.company)) { + this.company = data().deepCopy(fields()[2].schema(), other.company); + fieldSetFlags()[2] = true; + } + if (isValidValue(fields()[3], other.street)) { + this.street = data().deepCopy(fields()[3].schema(), other.street); + fieldSetFlags()[3] = true; + } + if (isValidValue(fields()[4], other.city)) { + this.city = data().deepCopy(fields()[4].schema(), other.city); + fieldSetFlags()[4] = true; + } + if (isValidValue(fields()[5], other.county)) { + this.county = data().deepCopy(fields()[5].schema(), other.county); + fieldSetFlags()[5] = true; + } + if (isValidValue(fields()[6], other.state)) { + this.state = data().deepCopy(fields()[6].schema(), other.state); + fieldSetFlags()[6] = true; + } + if (isValidValue(fields()[7], other.zip)) { + this.zip = data().deepCopy(fields()[7].schema(), other.zip); + fieldSetFlags()[7] = true; + } + if (isValidValue(fields()[8], other.homePhone)) { + this.homePhone = data().deepCopy(fields()[8].schema(), other.homePhone); + fieldSetFlags()[8] = true; + } + if (isValidValue(fields()[9], other.cellPhone)) { + this.cellPhone = data().deepCopy(fields()[9].schema(), other.cellPhone); + fieldSetFlags()[9] = true; + } + if (isValidValue(fields()[10], other.email)) { + this.email = data().deepCopy(fields()[10].schema(), other.email); + fieldSetFlags()[10] = true; + } + if (isValidValue(fields()[11], other.website)) { + this.website = data().deepCopy(fields()[11].schema(), other.website); + fieldSetFlags()[11] = true; + } + } + + /** + * Gets the value of the 'firstname' field. + * @return The value. + */ + public java.lang.String getFirstname() { + return firstname; + } + + + /** + * Sets the value of the 'firstname' field. + * @param value The value of 'firstname'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setFirstname(java.lang.String value) { + validate(fields()[0], value); + this.firstname = value; + fieldSetFlags()[0] = true; + return this; + } + + /** + * Checks whether the 'firstname' field has been set. + * @return True if the 'firstname' field has been set, false otherwise. + */ + public boolean hasFirstname() { + return fieldSetFlags()[0]; + } + + + /** + * Clears the value of the 'firstname' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearFirstname() { + firstname = null; + fieldSetFlags()[0] = false; + return this; + } + + /** + * Gets the value of the 'lastname' field. + * @return The value. + */ + public java.lang.String getLastname() { + return lastname; + } + + + /** + * Sets the value of the 'lastname' field. + * @param value The value of 'lastname'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setLastname(java.lang.String value) { + validate(fields()[1], value); + this.lastname = value; + fieldSetFlags()[1] = true; + return this; + } + + /** + * Checks whether the 'lastname' field has been set. + * @return True if the 'lastname' field has been set, false otherwise. + */ + public boolean hasLastname() { + return fieldSetFlags()[1]; + } + + + /** + * Clears the value of the 'lastname' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearLastname() { + lastname = null; + fieldSetFlags()[1] = false; + return this; + } + + /** + * Gets the value of the 'company' field. + * @return The value. + */ + public java.lang.String getCompany() { + return company; + } + + + /** + * Sets the value of the 'company' field. + * @param value The value of 'company'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setCompany(java.lang.String value) { + validate(fields()[2], value); + this.company = value; + fieldSetFlags()[2] = true; + return this; + } + + /** + * Checks whether the 'company' field has been set. + * @return True if the 'company' field has been set, false otherwise. + */ + public boolean hasCompany() { + return fieldSetFlags()[2]; + } + + + /** + * Clears the value of the 'company' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearCompany() { + company = null; + fieldSetFlags()[2] = false; + return this; + } + + /** + * Gets the value of the 'street' field. + * @return The value. + */ + public java.lang.String getStreet() { + return street; + } + + + /** + * Sets the value of the 'street' field. + * @param value The value of 'street'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setStreet(java.lang.String value) { + validate(fields()[3], value); + this.street = value; + fieldSetFlags()[3] = true; + return this; + } + + /** + * Checks whether the 'street' field has been set. + * @return True if the 'street' field has been set, false otherwise. + */ + public boolean hasStreet() { + return fieldSetFlags()[3]; + } + + + /** + * Clears the value of the 'street' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearStreet() { + street = null; + fieldSetFlags()[3] = false; + return this; + } + + /** + * Gets the value of the 'city' field. + * @return The value. + */ + public java.lang.String getCity() { + return city; + } + + + /** + * Sets the value of the 'city' field. + * @param value The value of 'city'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setCity(java.lang.String value) { + validate(fields()[4], value); + this.city = value; + fieldSetFlags()[4] = true; + return this; + } + + /** + * Checks whether the 'city' field has been set. + * @return True if the 'city' field has been set, false otherwise. + */ + public boolean hasCity() { + return fieldSetFlags()[4]; + } + + + /** + * Clears the value of the 'city' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearCity() { + city = null; + fieldSetFlags()[4] = false; + return this; + } + + /** + * Gets the value of the 'county' field. + * @return The value. + */ + public java.lang.String getCounty() { + return county; + } + + + /** + * Sets the value of the 'county' field. + * @param value The value of 'county'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setCounty(java.lang.String value) { + validate(fields()[5], value); + this.county = value; + fieldSetFlags()[5] = true; + return this; + } + + /** + * Checks whether the 'county' field has been set. + * @return True if the 'county' field has been set, false otherwise. + */ + public boolean hasCounty() { + return fieldSetFlags()[5]; + } + + + /** + * Clears the value of the 'county' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearCounty() { + county = null; + fieldSetFlags()[5] = false; + return this; + } + + /** + * Gets the value of the 'state' field. + * @return The value. + */ + public java.lang.String getState() { + return state; + } + + + /** + * Sets the value of the 'state' field. + * @param value The value of 'state'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setState(java.lang.String value) { + validate(fields()[6], value); + this.state = value; + fieldSetFlags()[6] = true; + return this; + } + + /** + * Checks whether the 'state' field has been set. + * @return True if the 'state' field has been set, false otherwise. + */ + public boolean hasState() { + return fieldSetFlags()[6]; + } + + + /** + * Clears the value of the 'state' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearState() { + state = null; + fieldSetFlags()[6] = false; + return this; + } + + /** + * Gets the value of the 'zip' field. + * @return The value. + */ + public java.lang.String getZip() { + return zip; + } + + + /** + * Sets the value of the 'zip' field. + * @param value The value of 'zip'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setZip(java.lang.String value) { + validate(fields()[7], value); + this.zip = value; + fieldSetFlags()[7] = true; + return this; + } + + /** + * Checks whether the 'zip' field has been set. + * @return True if the 'zip' field has been set, false otherwise. + */ + public boolean hasZip() { + return fieldSetFlags()[7]; + } + + + /** + * Clears the value of the 'zip' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearZip() { + zip = null; + fieldSetFlags()[7] = false; + return this; + } + + /** + * Gets the value of the 'homePhone' field. + * @return The value. + */ + public java.lang.String getHomePhone() { + return homePhone; + } + + + /** + * Sets the value of the 'homePhone' field. + * @param value The value of 'homePhone'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setHomePhone(java.lang.String value) { + validate(fields()[8], value); + this.homePhone = value; + fieldSetFlags()[8] = true; + return this; + } + + /** + * Checks whether the 'homePhone' field has been set. + * @return True if the 'homePhone' field has been set, false otherwise. + */ + public boolean hasHomePhone() { + return fieldSetFlags()[8]; + } + + + /** + * Clears the value of the 'homePhone' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearHomePhone() { + homePhone = null; + fieldSetFlags()[8] = false; + return this; + } + + /** + * Gets the value of the 'cellPhone' field. + * @return The value. + */ + public java.lang.String getCellPhone() { + return cellPhone; + } + + + /** + * Sets the value of the 'cellPhone' field. + * @param value The value of 'cellPhone'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setCellPhone(java.lang.String value) { + validate(fields()[9], value); + this.cellPhone = value; + fieldSetFlags()[9] = true; + return this; + } + + /** + * Checks whether the 'cellPhone' field has been set. + * @return True if the 'cellPhone' field has been set, false otherwise. + */ + public boolean hasCellPhone() { + return fieldSetFlags()[9]; + } + + + /** + * Clears the value of the 'cellPhone' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearCellPhone() { + cellPhone = null; + fieldSetFlags()[9] = false; + return this; + } + + /** + * Gets the value of the 'email' field. + * @return The value. + */ + public java.lang.String getEmail() { + return email; + } + + + /** + * Sets the value of the 'email' field. + * @param value The value of 'email'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setEmail(java.lang.String value) { + validate(fields()[10], value); + this.email = value; + fieldSetFlags()[10] = true; + return this; + } + + /** + * Checks whether the 'email' field has been set. + * @return True if the 'email' field has been set, false otherwise. + */ + public boolean hasEmail() { + return fieldSetFlags()[10]; + } + + + /** + * Clears the value of the 'email' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearEmail() { + email = null; + fieldSetFlags()[10] = false; + return this; + } + + /** + * Gets the value of the 'website' field. + * @return The value. + */ + public java.lang.String getWebsite() { + return website; + } + + + /** + * Sets the value of the 'website' field. + * @param value The value of 'website'. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder setWebsite(java.lang.String value) { + validate(fields()[11], value); + this.website = value; + fieldSetFlags()[11] = true; + return this; + } + + /** + * Checks whether the 'website' field has been set. + * @return True if the 'website' field has been set, false otherwise. + */ + public boolean hasWebsite() { + return fieldSetFlags()[11]; + } + + + /** + * Clears the value of the 'website' field. + * @return This builder. + */ + public com.amazonaws.services.lambda.samples.events.msk.Contact.Builder clearWebsite() { + website = null; + fieldSetFlags()[11] = false; + return this; + } + + @Override + @SuppressWarnings("unchecked") + public Contact build() { + try { + Contact record = new Contact(); + record.firstname = fieldSetFlags()[0] ? this.firstname : (java.lang.String) defaultValue(fields()[0]); + record.lastname = fieldSetFlags()[1] ? this.lastname : (java.lang.String) defaultValue(fields()[1]); + record.company = fieldSetFlags()[2] ? this.company : (java.lang.String) defaultValue(fields()[2]); + record.street = fieldSetFlags()[3] ? this.street : (java.lang.String) defaultValue(fields()[3]); + record.city = fieldSetFlags()[4] ? this.city : (java.lang.String) defaultValue(fields()[4]); + record.county = fieldSetFlags()[5] ? this.county : (java.lang.String) defaultValue(fields()[5]); + record.state = fieldSetFlags()[6] ? this.state : (java.lang.String) defaultValue(fields()[6]); + record.zip = fieldSetFlags()[7] ? this.zip : (java.lang.String) defaultValue(fields()[7]); + record.homePhone = fieldSetFlags()[8] ? this.homePhone : (java.lang.String) defaultValue(fields()[8]); + record.cellPhone = fieldSetFlags()[9] ? this.cellPhone : (java.lang.String) defaultValue(fields()[9]); + record.email = fieldSetFlags()[10] ? this.email : (java.lang.String) defaultValue(fields()[10]); + record.website = fieldSetFlags()[11] ? this.website : (java.lang.String) defaultValue(fields()[11]); + return record; + } catch (org.apache.avro.AvroMissingFieldException e) { + throw e; + } catch (java.lang.Exception e) { + throw new org.apache.avro.AvroRuntimeException(e); + } + } + } + + @SuppressWarnings("unchecked") + private static final org.apache.avro.io.DatumWriter + WRITER$ = (org.apache.avro.io.DatumWriter)MODEL$.createDatumWriter(SCHEMA$); + + @Override public void writeExternal(java.io.ObjectOutput out) + throws java.io.IOException { + WRITER$.write(this, SpecificData.getEncoder(out)); + } + + @SuppressWarnings("unchecked") + private static final org.apache.avro.io.DatumReader + READER$ = (org.apache.avro.io.DatumReader)MODEL$.createDatumReader(SCHEMA$); + + @Override public void readExternal(java.io.ObjectInput in) + throws java.io.IOException { + READER$.read(this, SpecificData.getDecoder(in)); + } + + @Override protected boolean hasCustomCoders() { return true; } + + @Override public void customEncode(org.apache.avro.io.Encoder out) + throws java.io.IOException + { + if (this.firstname == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.firstname); + } + + if (this.lastname == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.lastname); + } + + if (this.company == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.company); + } + + if (this.street == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.street); + } + + if (this.city == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.city); + } + + if (this.county == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.county); + } + + if (this.state == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.state); + } + + if (this.zip == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.zip); + } + + if (this.homePhone == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.homePhone); + } + + if (this.cellPhone == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.cellPhone); + } + + if (this.email == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.email); + } + + if (this.website == null) { + out.writeIndex(0); + out.writeNull(); + } else { + out.writeIndex(1); + out.writeString(this.website); + } + + } + + @Override public void customDecode(org.apache.avro.io.ResolvingDecoder in) + throws java.io.IOException + { + org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff(); + if (fieldOrder == null) { + if (in.readIndex() != 1) { + in.readNull(); + this.firstname = null; + } else { + this.firstname = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.lastname = null; + } else { + this.lastname = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.company = null; + } else { + this.company = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.street = null; + } else { + this.street = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.city = null; + } else { + this.city = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.county = null; + } else { + this.county = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.state = null; + } else { + this.state = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.zip = null; + } else { + this.zip = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.homePhone = null; + } else { + this.homePhone = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.cellPhone = null; + } else { + this.cellPhone = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.email = null; + } else { + this.email = in.readString(); + } + + if (in.readIndex() != 1) { + in.readNull(); + this.website = null; + } else { + this.website = in.readString(); + } + + } else { + for (int i = 0; i < 12; i++) { + switch (fieldOrder[i].pos()) { + case 0: + if (in.readIndex() != 1) { + in.readNull(); + this.firstname = null; + } else { + this.firstname = in.readString(); + } + break; + + case 1: + if (in.readIndex() != 1) { + in.readNull(); + this.lastname = null; + } else { + this.lastname = in.readString(); + } + break; + + case 2: + if (in.readIndex() != 1) { + in.readNull(); + this.company = null; + } else { + this.company = in.readString(); + } + break; + + case 3: + if (in.readIndex() != 1) { + in.readNull(); + this.street = null; + } else { + this.street = in.readString(); + } + break; + + case 4: + if (in.readIndex() != 1) { + in.readNull(); + this.city = null; + } else { + this.city = in.readString(); + } + break; + + case 5: + if (in.readIndex() != 1) { + in.readNull(); + this.county = null; + } else { + this.county = in.readString(); + } + break; + + case 6: + if (in.readIndex() != 1) { + in.readNull(); + this.state = null; + } else { + this.state = in.readString(); + } + break; + + case 7: + if (in.readIndex() != 1) { + in.readNull(); + this.zip = null; + } else { + this.zip = in.readString(); + } + break; + + case 8: + if (in.readIndex() != 1) { + in.readNull(); + this.homePhone = null; + } else { + this.homePhone = in.readString(); + } + break; + + case 9: + if (in.readIndex() != 1) { + in.readNull(); + this.cellPhone = null; + } else { + this.cellPhone = in.readString(); + } + break; + + case 10: + if (in.readIndex() != 1) { + in.readNull(); + this.email = null; + } else { + this.email = in.readString(); + } + break; + + case 11: + if (in.readIndex() != 1) { + in.readNull(); + this.website = null; + } else { + this.website = in.readString(); + } + break; + + default: + throw new java.io.IOException("Corrupt ResolvingDecoder."); + } + } + } + } +} + + + + + + + + + + diff --git a/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaProducerHelper.java b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaProducerHelper.java new file mode 100644 index 000000000..b0407ee82 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/kafka_event_producer_function/src/main/java/com/amazonaws/services/lambda/samples/events/msk/KafkaProducerHelper.java @@ -0,0 +1,115 @@ +package com.amazonaws.services.lambda.samples.events.msk; + +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.StringSerializer; +import software.amazon.awssdk.services.kafka.KafkaClient; +import software.amazon.awssdk.services.kafka.model.GetBootstrapBrokersRequest; +import software.amazon.awssdk.services.kafka.model.GetBootstrapBrokersResponse; +import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; + +import com.amazonaws.services.schemaregistry.serializers.avro.AWSKafkaAvroSerializer; +import com.amazonaws.services.schemaregistry.utils.AWSSchemaRegistryConstants; +import com.amazonaws.services.schemaregistry.utils.AvroRecordType; + +import java.util.Properties; +import java.util.concurrent.ExecutionException; + +/** + * Helper class for producing messages to Kafka + */ +public class KafkaProducerHelper { + + /** + * Get bootstrap brokers for an MSK cluster + * + * @param clusterArn ARN of the MSK cluster + * @return Bootstrap brokers string + */ + public static String getBootstrapBrokers(String clusterArn) { + try { + // Explicitly specify the HTTP client implementation + KafkaClient kafkaClient = KafkaClient.builder() + .httpClientBuilder(UrlConnectionHttpClient.builder()) + .build(); + + GetBootstrapBrokersRequest request = GetBootstrapBrokersRequest.builder() + .clusterArn(clusterArn) + .build(); + + GetBootstrapBrokersResponse response = kafkaClient.getBootstrapBrokers(request); + return response.bootstrapBrokerStringSaslIam(); + } catch (Exception e) { + throw new RuntimeException("Failed to get bootstrap brokers: " + e.getMessage(), e); + } + } + + /** + * Create a Kafka producer configured for IAM authentication and AWS Glue Schema Registry + * + * @param bootstrapServers Bootstrap servers string + * @param region AWS region + * @param registryName Schema registry name + * @param schemaName Schema name + * @return Configured Kafka producer + */ + public static Producer createProducer(String bootstrapServers, String region, + String registryName, String schemaName) { + Properties props = new Properties(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, AWSKafkaAvroSerializer.class.getName()); + + // Configure IAM authentication + props.put("security.protocol", "SASL_SSL"); + props.put("sasl.mechanism", "AWS_MSK_IAM"); + props.put("sasl.jaas.config", "software.amazon.msk.auth.iam.IAMLoginModule required;"); + props.put("sasl.client.callback.handler.class", "software.amazon.msk.auth.iam.IAMClientCallbackHandler"); + + // Configure AWS Glue Schema Registry + props.put(AWSSchemaRegistryConstants.AWS_REGION, region); + props.put(AWSSchemaRegistryConstants.REGISTRY_NAME, registryName); + props.put(AWSSchemaRegistryConstants.SCHEMA_NAME, schemaName); + props.put(AWSSchemaRegistryConstants.AVRO_RECORD_TYPE, AvroRecordType.SPECIFIC_RECORD.getName()); + props.put(AWSSchemaRegistryConstants.SCHEMA_AUTO_REGISTRATION_SETTING, true); + + // Additional producer configurations + props.put(ProducerConfig.ACKS_CONFIG, "all"); + props.put(ProducerConfig.RETRIES_CONFIG, 3); + props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 120000); // 2 minutes + props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000); // 1 minute + + return new KafkaProducer<>(props); + } + + /** + * Send an AVRO message to a Kafka topic + * + * @param producer Kafka producer + * @param topic Topic name + * @param key Message key (can be null) + * @param contact Contact object (SpecificRecord) + * @throws ExecutionException If sending fails + * @throws InterruptedException If sending is interrupted + */ + public static void sendAvroMessage(Producer producer, String topic, String key, Contact contact) + throws ExecutionException, InterruptedException { + try { + // Print Contact details before sending + System.out.println("Sending AVRO message to topic: '" + topic + "'"); + System.out.println("Message key: " + key); + System.out.println("Contact record: " + contact.toString()); + + // Create and send the record (Contact is now a SpecificRecord) + ProducerRecord record = new ProducerRecord<>(topic, key, contact); + producer.send(record).get(); // Using get() to make it synchronous + System.out.println("Successfully sent AVRO message to topic: " + topic); + } catch (Exception e) { + System.err.println("Error sending message to topic '" + topic + "': " + e.getMessage()); + e.printStackTrace(); + throw e; + } + } +} diff --git a/msk-lambda-schema-avro-java-sam/template.yaml b/msk-lambda-schema-avro-java-sam/template.yaml new file mode 100644 index 000000000..c416c991d --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/template.yaml @@ -0,0 +1,233 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Description: > + kafka_event_consumer_and_producer_functions + + Sample SAM Template for MSK consumer and AVRO producer with IAM auth + +# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst +Globals: + Function: + Timeout: 15 + +Resources: + # SQS Queue to use as Dead Letter Queue for the MSK event source mapping + ConsumerDLQ: + Type: AWS::SQS::Queue + Properties: + MessageRetentionPeriod: 1209600 # 14 days (maximum retention period) + VisibilityTimeout: 300 # 5 minutes + Tags: + - Key: Purpose + Value: MSKConsumerDLQ + + LambdaMSKConsumerJavaFunction: + Type: AWS::Serverless::Function # More info about Function Resource: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction + Properties: + CodeUri: kafka_event_consumer_function + Handler: com.amazonaws.services.lambda.samples.events.msk.AvroKafkaHandler::handleRequest + Runtime: java21 + Architectures: + - x86_64 + MemorySize: 512 + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + Environment: # More info about Env Vars: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#environment-object + Variables: + PARAM1: VALUE + JAVA_TOOL_OPTIONS: -XX:+TieredCompilation -XX:TieredStopAtLevel=1 # More info about tiered compilation https://aws.amazon.com/blogs/compute/optimizing-aws-lambda-function-performance-for-java/ + POWERTOOLS_LOG_LEVEL: TRACE + POWERTOOLS_SERVICE_NAME: kafka_consumer + Events: + MSKEvent: + Type: MSK + Properties: + StartingPosition: LATEST + BatchSize: 1 + MaximumBatchingWindowInSeconds: 1 + Stream: !Join [ '', ["arn:", "aws:", "kafka:", !Ref "AWS::Region" , ":" ,!Ref "AWS::AccountId", ":", "cluster/", !Ref MSKClusterName, "/" , !Ref MSKClusterId] ] + Topics: + - !Ref MSKTopic + DestinationConfig: + OnFailure: + Destination: !GetAtt ConsumerDLQ.Arn + FilterCriteria: + Filters: + - Pattern: '{"value": {"zip": [ { "prefix": "2000" } ]}}' + Policies: + - Statement: + - Sid: KafkaClusterPermissionsPolicy + Effect: Allow + Action: + - kafka-cluster:Connect + - kafka-cluster:DescribeGroup + - kafka-cluster:DescribeCluster + - kafka-cluster:AlterCluster + - kafka-cluster:AlterClusterDynamicConfiguration + - kafka-cluster:WriteDataIdempotently + - kafka-cluster:AlterGroup + - kafka-cluster:DescribeTopic + - kafka-cluster:ReadData + - kafka-cluster:DescribeClusterDynamicConfiguration + Resource: + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "cluster/", !Ref MSKClusterName, "/" , !Ref MSKClusterId]] + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "topic/", !Ref MSKClusterName, "/" , !Ref MSKClusterId, "/*"]] + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "group/", !Ref MSKClusterName, "/" , !Ref MSKClusterId, "/*"]] + + - Sid: KafkaPermissionsPolicy + Effect: Allow + Action: + - kafka:DescribeClusterV2 + - kafka:GetBootstrapBrokers + Resource: '*' + + - Sid: EC2PermissionsPolicy + Effect: Allow + Action: + - ec2:DescribeSecurityGroups + - ec2:DescribeSubnets + - ec2:DescribeVpcs + - ec2:CreateNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DeleteNetworkInterface + Resource: '*' + + - Sid: GlueSchemaRegistryPermissionsPolicy + Effect: Allow + Action: + - glue:GetSchemaByDefinition + - glue:GetSchemaVersion + - glue:GetRegistry + - glue:ListSchemas + - glue:ListSchemaVersions + - glue:RegisterSchemaVersion + - glue:PutSchemaVersionMetadata + - glue:GetSchemaVersionsDiff + - glue:QuerySchemaVersionMetadata + Resource: '*' + + - Sid: SQSPermissionsPolicy + Effect: Allow + Action: + - sqs:SendMessage + Resource: !GetAtt ConsumerDLQ.Arn + - VPCAccessPolicy: {} + + LambdaMSKProducerJavaFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: kafka_event_producer_function + Handler: com.amazonaws.services.lambda.samples.events.msk.AvroProducerHandler::handleRequest + Runtime: java21 + Timeout: 300 + Architectures: + - x86_64 + MemorySize: 512 + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + Environment: + Variables: + MSK_CLUSTER_ARN: !Join [ '', ["arn:", "aws:", "kafka:", !Ref "AWS::Region" , ":" ,!Ref "AWS::AccountId", ":", "cluster/", !Ref MSKClusterName, "/" , !Ref MSKClusterId] ] + MSK_TOPIC: !Ref MSKTopic + REGISTRY_NAME: !Ref GlueSchemaRegistryName + CONTACT_SCHEMA_NAME: !Ref ContactSchemaName + JAVA_TOOL_OPTIONS: -XX:+TieredCompilation -XX:TieredStopAtLevel=1 + Policies: + - Statement: + - Sid: KafkaClusterPermissionsPolicy + Effect: Allow + Action: + - kafka-cluster:Connect + - kafka-cluster:DescribeCluster + - kafka-cluster:WriteData + - kafka-cluster:DescribeTopic + Resource: + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "cluster/", !Ref MSKClusterName, "/" , !Ref MSKClusterId]] + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "topic/", !Ref MSKClusterName, "/" , !Ref MSKClusterId, "/*"]] + + - Sid: KafkaPermissionsPolicy + Effect: Allow + Action: + - kafka:DescribeClusterV2 + - kafka:GetBootstrapBrokers + Resource: '*' + + - Sid: EC2PermissionsPolicy + Effect: Allow + Action: + - ec2:DescribeSecurityGroups + - ec2:DescribeSubnets + - ec2:DescribeVpcs + - ec2:CreateNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DeleteNetworkInterface + Resource: '*' + + - Sid: GlueSchemaRegistryPermissionsPolicy + Effect: Allow + Action: + - glue:GetSchemaByDefinition + - glue:GetSchemaVersion + - glue:GetRegistry + - glue:ListSchemas + - glue:ListSchemaVersions + - glue:GetSchemaVersionsDiff + - glue:QuerySchemaVersionMetadata + - glue:RegisterSchemaVersion + - glue:PutSchemaVersionMetadata + - glue:CreateSchema + - glue:CreateRegistry + Resource: + - !Sub "arn:aws:glue:${AWS::Region}:${AWS::AccountId}:registry/*" + - !Sub "arn:aws:glue:${AWS::Region}:${AWS::AccountId}:schema/*/*" + - VPCAccessPolicy: {} + +Parameters: + MSKClusterName: + Type: String + Description: Enter the name of the MSK Cluster + Default: CLUSTER_NAME + MSKClusterId: + Type: String + Description: Enter the ID of the MSK Cluster + Default: CLUSTER_ID + MSKTopic: + Type: String + Description: Enter the name of the MSK Topic + Default: KAFKA_TOPIC + GlueSchemaRegistryName: + Type: String + Description: Enter the name of the Glue Schema Registry + Default: GLUE_SCHEMA_REGISTRY_NAME + ContactSchemaName: + Type: String + Description: Enter the name of the Contact Schema + Default: AVRO_SCHEMA + VpcId: + Type: String + Description: Enter the VPC ID where the MSK cluster is deployed + Default: VPC_ID + SubnetIds: + Type: CommaDelimitedList + Description: Enter the subnet IDs where the MSK cluster is deployed (comma-separated) + Default: SUBNET_IDS + SecurityGroupIds: + Type: CommaDelimitedList + Description: Enter the security group IDs that allow access to the MSK cluster (comma-separated) + Default: LAMBDA_SECURITY_GROUP_ID + +Outputs: + MSKConsumerLambdaFunction: + Description: "Topic Consumer Lambda Function ARN" + Value: !GetAtt LambdaMSKConsumerJavaFunction.Arn + MSKProducerLambdaFunction: + Description: "AVRO Producer Lambda Function ARN" + Value: !GetAtt LambdaMSKProducerJavaFunction.Arn + ConsumerDLQUrl: + Description: "URL of the Dead Letter Queue for the MSK Consumer" + Value: !Ref ConsumerDLQ + ConsumerDLQArn: + Description: "ARN of the Dead Letter Queue for the MSK Consumer" + Value: !GetAtt ConsumerDLQ.Arn diff --git a/msk-lambda-schema-avro-java-sam/template_original.yaml b/msk-lambda-schema-avro-java-sam/template_original.yaml new file mode 100644 index 000000000..e77964ce2 --- /dev/null +++ b/msk-lambda-schema-avro-java-sam/template_original.yaml @@ -0,0 +1,240 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Description: > + kafka_event_consumer_and_producer_functions + + Sample SAM Template for MSK consumer and AVRO producer with IAM auth + +# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst +Globals: + Function: + Timeout: 15 + +Resources: + # SQS Queue to use as Dead Letter Queue for the MSK event source mapping + ConsumerDLQ: + Type: AWS::SQS::Queue + Properties: + MessageRetentionPeriod: 1209600 # 14 days (maximum retention period) + VisibilityTimeout: 300 # 5 minutes + Tags: + - Key: Purpose + Value: MSKConsumerDLQ + + LambdaMSKConsumerJavaFunction: + Type: AWS::Serverless::Function # More info about Function Resource: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction + Properties: + CodeUri: kafka_event_consumer_function + Handler: com.amazonaws.services.lambda.samples.events.msk.HandlerMSK::handleRequest + Runtime: java21 + Architectures: + - x86_64 + MemorySize: 512 + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + Environment: # More info about Env Vars: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#environment-object + Variables: + PARAM1: VALUE + JAVA_TOOL_OPTIONS: -XX:+TieredCompilation -XX:TieredStopAtLevel=1 # More info about tiered compilation https://aws.amazon.com/blogs/compute/optimizing-aws-lambda-function-performance-for-java/ + Events: + MSKEvent: + Type: MSK + Properties: + StartingPosition: LATEST + BatchSize: 1 + MaximumBatchingWindowInSeconds: 1 + Stream: !Join [ '', ["arn:", "aws:", "kafka:", !Ref "AWS::Region" , ":" ,!Ref "AWS::AccountId", ":", "cluster/", !Ref MSKClusterName, "/" , !Ref MSKClusterId] ] + Topics: + - !Ref MSKTopic + DestinationConfig: + OnFailure: + Destination: !GetAtt ConsumerDLQ.Arn + ProvisionedPollerConfig: + MaximumPollers: 3 + MinimumPollers: 1 + SchemaRegistryConfig: + SchemaRegistryURI: !Sub 'arn:aws:glue:${AWS::Region}:${AWS::AccountId}:registry/${GlueSchemaRegistryName}' + SchemaRegistryURI: !Join [ '', ["arn:", "aws:", "glue:", !Ref "AWS::Region" , ":" ,!Ref "AWS::AccountId", ":", "registry/", !Ref GlueSchemaRegistryName] ] + EventRecordFormat: SOURCE + SchemaValidationConfigs: + - Attribute: VALUE + FilterCriteria: + Filters: + - Pattern: '{"value": {"zip": [ { "prefix": "2000" } ]}}' + Policies: + - Statement: + - Sid: KafkaClusterPermissionsPolicy + Effect: Allow + Action: + - kafka-cluster:Connect + - kafka-cluster:DescribeGroup + - kafka-cluster:DescribeCluster + - kafka-cluster:AlterCluster + - kafka-cluster:AlterClusterDynamicConfiguration + - kafka-cluster:WriteDataIdempotently + - kafka-cluster:AlterGroup + - kafka-cluster:DescribeTopic + - kafka-cluster:ReadData + - kafka-cluster:DescribeClusterDynamicConfiguration + Resource: + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "cluster/", !Ref MSKClusterName, "/" , !Ref MSKClusterId]] + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "topic/", !Ref MSKClusterName, "/" , !Ref MSKClusterId, "/*"]] + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "group/", !Ref MSKClusterName, "/" , !Ref MSKClusterId, "/*"]] + + - Sid: KafkaPermissionsPolicy + Effect: Allow + Action: + - kafka:DescribeClusterV2 + - kafka:GetBootstrapBrokers + Resource: '*' + + - Sid: EC2PermissionsPolicy + Effect: Allow + Action: + - ec2:DescribeSecurityGroups + - ec2:DescribeSubnets + - ec2:DescribeVpcs + - ec2:CreateNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DeleteNetworkInterface + Resource: '*' + + - Sid: GlueSchemaRegistryPermissionsPolicy + Effect: Allow + Action: + - glue:GetSchemaByDefinition + - glue:GetSchemaVersion + - glue:GetRegistry + - glue:ListSchemas + - glue:ListSchemaVersions + - glue:RegisterSchemaVersion + - glue:PutSchemaVersionMetadata + - glue:GetSchemaVersionsDiff + - glue:QuerySchemaVersionMetadata + Resource: '*' + + - Sid: SQSPermissionsPolicy + Effect: Allow + Action: + - sqs:SendMessage + Resource: !GetAtt ConsumerDLQ.Arn + - VPCAccessPolicy: {} + + LambdaMSKProducerJavaFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: kafka_event_producer_function + Handler: com.amazonaws.services.lambda.samples.events.msk.AvroProducerHandler::handleRequest + Runtime: java21 + Timeout: 300 + Architectures: + - x86_64 + MemorySize: 512 + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + Environment: + Variables: + MSK_CLUSTER_ARN: !Join [ '', ["arn:", "aws:", "kafka:", !Ref "AWS::Region" , ":" ,!Ref "AWS::AccountId", ":", "cluster/", !Ref MSKClusterName, "/" , !Ref MSKClusterId] ] + MSK_TOPIC: !Ref MSKTopic + REGISTRY_NAME: !Ref GlueSchemaRegistryName + CONTACT_SCHEMA_NAME: !Ref ContactSchemaName + JAVA_TOOL_OPTIONS: -XX:+TieredCompilation -XX:TieredStopAtLevel=1 + Policies: + - Statement: + - Sid: KafkaClusterPermissionsPolicy + Effect: Allow + Action: + - kafka-cluster:Connect + - kafka-cluster:DescribeCluster + - kafka-cluster:WriteData + - kafka-cluster:DescribeTopic + Resource: + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "cluster/", !Ref MSKClusterName, "/" , !Ref MSKClusterId]] + - !Join ['', ["arn:", "aws:", "kafka:", !Ref "AWS::Region", ":", !Ref "AWS::AccountId", ":", "topic/", !Ref MSKClusterName, "/" , !Ref MSKClusterId, "/*"]] + + - Sid: KafkaPermissionsPolicy + Effect: Allow + Action: + - kafka:DescribeClusterV2 + - kafka:GetBootstrapBrokers + Resource: '*' + + - Sid: EC2PermissionsPolicy + Effect: Allow + Action: + - ec2:DescribeSecurityGroups + - ec2:DescribeSubnets + - ec2:DescribeVpcs + - ec2:CreateNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DeleteNetworkInterface + Resource: '*' + + - Sid: GlueSchemaRegistryPermissionsPolicy + Effect: Allow + Action: + - glue:GetSchemaByDefinition + - glue:GetSchemaVersion + - glue:GetRegistry + - glue:ListSchemas + - glue:ListSchemaVersions + - glue:GetSchemaVersionsDiff + - glue:QuerySchemaVersionMetadata + - glue:RegisterSchemaVersion + - glue:PutSchemaVersionMetadata + - glue:CreateSchema + - glue:CreateRegistry + Resource: + - !Sub "arn:aws:glue:${AWS::Region}:${AWS::AccountId}:registry/*" + - !Sub "arn:aws:glue:${AWS::Region}:${AWS::AccountId}:schema/*/*" + - VPCAccessPolicy: {} + +Parameters: + MSKClusterName: + Type: String + Description: Enter the name of the MSK Cluster + Default: CLUSTER_NAME + MSKClusterId: + Type: String + Description: Enter the ID of the MSK Cluster + Default: CLUSTER_ID + MSKTopic: + Type: String + Description: Enter the name of the MSK Topic + Default: KAFKA_TOPIC + GlueSchemaRegistryName: + Type: String + Description: Enter the name of the Glue Schema Registry + Default: GLUE_SCHEMA_REGISTRY_NAME + ContactSchemaName: + Type: String + Description: Enter the name of the Contact Schema + Default: AVRO_SCHEMA + VpcId: + Type: String + Description: Enter the VPC ID where the MSK cluster is deployed + Default: VPC_ID + SubnetIds: + Type: CommaDelimitedList + Description: Enter the subnet IDs where the MSK cluster is deployed (comma-separated) + Default: SUBNET_IDS + SecurityGroupIds: + Type: CommaDelimitedList + Description: Enter the security group IDs that allow access to the MSK cluster (comma-separated) + Default: LAMBDA_SECURITY_GROUP_ID + +Outputs: + MSKConsumerLambdaFunction: + Description: "Topic Consumer Lambda Function ARN" + Value: !GetAtt LambdaMSKConsumerJavaFunction.Arn + MSKProducerLambdaFunction: + Description: "AVRO Producer Lambda Function ARN" + Value: !GetAtt LambdaMSKProducerJavaFunction.Arn + ConsumerDLQUrl: + Description: "URL of the Dead Letter Queue for the MSK Consumer" + Value: !Ref ConsumerDLQ + ConsumerDLQArn: + Description: "ARN of the Dead Letter Queue for the MSK Consumer" + Value: !GetAtt ConsumerDLQ.Arn