-
Notifications
You must be signed in to change notification settings - Fork 0
AWS Tips
Entry in SSH config for bastion. Bastion can also be used to forward ports of internal services, see commented examples.
Host host.dev Hostname 55.111.222.33 User admin IdentityFile ~/.ssh/id # LocalForward 6379 mycachegrp.asd123.ng.0001.euw1.cache.amazonaws.com:6379 # LocalForward 9200 my-internal-ESearch-11112222.eu-west-1.elb.amazonaws.com:9200 # LocalForward 3309 my-read-db.asd123qwe345.eu-west-1.rds.amazonaws.com:3306
A second entry is a wildcard config for private IPs
Host 10.0.* ProxyCommand ssh -W %h:%p host.dev User admin IdentityFile ~/.ssh/id
It’s possible to use a prefix/postfix string in Host to have separate config blocks for different servers (other user names, private ssh keys etc.). The string should be removed in ProxyCommand like this:
Host ec2-user.* ProxyCommand ssh -W $(echo %h | sed s/^ec2-user\\.//):%p my.app.dev User ec2-user IdentityFile ~/.ssh/id Host appuser.* ProxyCommand ssh -W $(echo %h | sed s/^appuser\\.//):%p my.app.dev User appuser IdentityFile ~/.ssh/id
Use aws-cli to get the private IP addresses
aws ec2 describe-instances \
--filter "Name=tag:Name,Values=my-app-web" \
--query Reservations[].Instances[].PrivateIpAddress \
--output text
ssh $(aws ec2 describe-instances --filter "Name=tag:Name,Values=my-app-web" --query Reservations[].Instances[].PrivateIpAddress --output text) "hostname; uptime"
This command may be extended and converted into awscli-aliases function
instances-by-tags =
!f() {
if [ -z $1 ]; then
echo "The strings to match Environment, Project and Role tags are required."
exit 1
fi
aws ec2 describe-instances \
--filters "Name=instance-state-name,Values=running" \
"Name=tag:Project,Values=${1}" \
"Name=tag:Role,Values=${2}" \
"Name=tag:Environment,Values=${3}" \
--query "Reservations[].Instances[].PrivateIpAddress" \
--output text
}; f
aws instances-by-tags myapp web development
This function can be used in another one, for example to run ssh sessions in tmux
ssh-appuser-tmux =
!f() {
private_ips=$(aws instances-by-tags ${1} ${2} ${3})
if [ -z "${private_ips}" ]; then
echo "The search strings don't match any instance."
exit 1
fi
for host in ${private_ips}; do
tmux new-window -n "${1:0:2}.${2}" "ssh ${host}" \; \
send-keys -l "${4}"\; send-keys "Enter"
done
}; f
bind-key S command-prompt -p "Project:","Role:","Environment:","Command:" -I "","","production","" "run-shell 'aws ssh-appuser-tmux %1 %2 %3 %4'":header-args+: :dir ~/Work/App :header-args+: :var KEY_ID=”alias/app-companion” :header-args+: :var ENCRYPTED_SECRET_AS_BLOB=”encrypted_secret_blob” :header-args+: :var DECRYPTED_SECRET_AS_BLOB=”decrypted_secret_blob”
IIDS=$(aws ec2 describe-instances \
--filter Name=instance-state-name,Values=running \
--query "Reservations[].Instances[]|[?Tags[?contains(Value,'Web')]].InstanceId" \
--output text)
aws ec2 create-tags \
--resources $IIDS \
--tags Key=Role,Value=web
us-east-1 | eu-west-1 |
subnet-510dca26 | subnet-d57ceab0 |
sg-ade390cb | sg-e5f58280 |
sg-14fe8d72 | sg-ece9ee88 |
echo "https://$BKT.s3.amazonaws.com$PFX/$TML"
aws --profile $PFL s3 cp $PTH/cloudformation/$TML s3://$BKT$PFX/
aws --profile $PFL cloudformation validate-template \
--template-url "https://$BKT.s3.amazonaws.com$PFX/$TML"
cd $PTH
tar cjf ../proj-aws-${REL}.tar.bz2 --exclude-vcs .
aws --profile $PFL s3 cp ../proj-aws-${REL}.tar.bz2 s3://$BKT/
aws --profile $PFL --region=$RGN cloudformation create-stack \
--stack-name $STN \
--template-url "https://$BKT.s3.amazonaws.com$PFX/$TML" \
--parameters ParameterKey=MaxPrice,ParameterValue=$PRC \
ParameterKey=Subnet,ParameterValue=$SBN \
ParameterKey=SecurityGroupIds,ParameterValue=$SGI \
ParameterKey=ELBSGIds,ParameterValue=$LBSG \
ParameterKey=CommitID,ParameterValue=$REL
aws --profile $PFL --region $RGN cloudformation update-stack \
--stack-name $STN \
--template-url "https://$BKT.s3.amazonaws.com$PFX/$TML" \
--parameters ParameterKey=Subnet,ParameterValue=$SBN \
ParameterKey=SecurityGroupIds,ParameterValue=$SGI \
ParameterKey=ELBSGIds,ParameterValue=$LBSG \
ParameterKey=CommitID,ParameterValue=$REL
aws --profile $PFL --region $RGN cloudformation delete-stack \
--stack-name $STN
(calc-eval "0.097 * 24 * 30")
APIURL=pricing.us-east-1.amazonaws.com
SRVS=AmazonEC2
PATH=$(curl -s https://$APIURL/offers/v1.0/aws/index.json |jq -r ".offers|.${SRVS}|.currentVersionUrl")
TP=m3.medium
LTN="US East (N. Virginia)"
OS=Linux
curl -s https://$APIURL$PATH |jq \
-r ".products[]|.attributes|select(.instanceType == \"$TP\" and .location == \"\" and .operatingSystem == \"\")"
Using AWS CodeDeploy to Deploy an Application to an Auto Scaling Group AWS CodeDeploy::AppSpec ‘hooks’ Section github::stelligent::stelligent_commons::codepipeline-codecommit.json github::stelligent::stelligent_commons::codedeploy-master.json github::stelligent::stelligent_commons::codedeploy-deployment.json
REG=us-east-1
aws --region $REG ec2 describe-images \
--owners amazon \
--filters Name=architecture,Values=x86_64 \
Name=virtualization-type,Values=hvm \
Name=name,Values='amzn2-*-gp2' \
--query 'sort_by(Images, &CreationDate)[-1].[ImageId,Name]' --output text
Just for future reference, if you have to perform a stress-test on one of the AWS resources I would recommend that you notify us with the following form: https://portal.aws.amazon.com/gp/aws/html-forms-controller/contactus/AWSSecurityPenTestRequest
for i in `cat`; do aws --profile ras route53 create-hosted-zone --name $i --caller-reference $(date +%s) >> domains.20150305.json; done
aws route53 list-hosted-zones --output text |grep blabla.de
aws route53 list-resource-record-sets --hosted-zone-id Z2C9O4QJDOHI --output json
aws route53 change-resource-record-sets --hosted-zone-id Z2C9O4QJDOHI --change-batch '{ "Comment": "delete record","Changes": [{ "Action": "DELETE","ResourceRecordSet": { "Name": "www.blabla.de.","Type": "CNAME","TTL": 300,"ResourceRecords": [{ "Value": "blabla.de" }] } }] }'
aws route53 change-resource-record-sets --hosted-zone-id Z2C9O4QJDOHI --change-batch '{ "Comment": "delete record","Changes": [{ "Action": "DELETE","ResourceRecordSet":{ "AliasTarget": { "HostedZoneId": "Z3NF1Z3NOM5OY2","EvaluateTargetHealth": false,"DNSName": "awseb-e-q-awsebloa-atqcbij53zfz-222111333.eu-west-1.elb.amazonaws.com." },"Type": "A","Name": "blabla.de." }}] }'
aws route53 delete-hosted-zone --id Z2C9O4QJDOHI
for DMN in blabla.de blabla.com blabla.com.br blabla.it
do
DMNID=$(aws route53 list-hosted-zones --output text |grep $DMN |cut -f3 |cut -d'/' -f3)
echo "{ \"Comment\": \"delete record\",\"Changes\": [{ \"Action\": \"DELETE\",\"ResourceRecordSet\": $(aws route53 list-resource-record-sets --hosted-zone-id $DMNID --start-record-name www.$DMN --start-record-type CNAME --max-items 1 --output json |jq -c ".ResourceRecordSets|.[0]")}] }" > REC1
echo "{ \"Comment\": \"delete record\",\"Changes\": [{ \"Action\": \"DELETE\",\"ResourceRecordSet\": $(aws route53 list-resource-record-sets --hosted-zone-id $DMNID --start-record-name $DMN --start-record-type A --max-items 1 --output json |jq -c ".ResourceRecordSets|.[0]")}] }" > REC2
aws route53 change-resource-record-sets --hosted-zone-id $DMNID --change-batch file://REC1
aws route53 change-resource-record-sets --hosted-zone-id $DMNID --change-batch file://REC2
done
# Check files
cat REC1 REC2
aws route53 list-resource-record-sets --hosted-zone-id $DMNID
Delete empthy hosted zones
PRF=proj
DMNS="blabla.co.uk blabla.com"
for DMN in $DMNS; do
DMNID=$(aws --profile $PRF \
route53 list-hosted-zones --output text \
--query "HostedZones[?starts_with(Name,'${DMN}')].Id" |cut -f3 -d/)
aws --profile $PRF route53 delete-hosted-zone --id $DMNID
done
Removing S3 hosted domains
PRF=proj
DMN=blabla.com
BKTS=$(aws --profile $PRF \
s3api list-buckets |jq \
-r ".Buckets[]|select(.Name|test(\".*.${DMN}.*\"))|.Name")
aws --profile $PRF s3 mb s3://$DMN
for BKT in $BKTS; do
aws --profile $PRF s3 sync s3://$BKT/ s3://$DMN/${BKT%%.*}/
aws --profile $PRF s3 rb s3://$BKT --force
done
DMN=
DMNID=$(aws route53 list-hosted-zones --output text |grep $DMN |cut -f3 |cut -d'/' -f3)
echo "{ \"Comment\": \"delete record\",\"Changes\": [{ \"Action\": \"DELETE\",\"ResourceRecordSet\": $(aws route53 list-resource-record-sets --hosted-zone-id $DMNID --start-record-name www.$DMN --start-record-type CNAME --max-items 1 --output json |jq -c ".ResourceRecordSets|.[0]")}] }" > ~/tmp/REC1
# Check files
cat REC1
sed 's/DELETE/CREATE/;s/www/\\\\052/' /tmp/REC1 > /tmp/REC2
cat REC2
aws route53 change-resource-record-sets --hosted-zone-id $DMNID --change-batch file:///tmp/REC2
aws route53 change-resource-record-sets --hosted-zone-id $DMNID --change-batch file:///tmp/REC1
aws route53 list-resource-record-sets --hosted-zone-id $DMNID --output json
ELB1=awseb-e-d-AWSEBLoa-15O5LKLCGDQ
ELB2=awseb-e-n-AWSEBLoa-VUAAPUK2UE
ELBID1=$(aws elb describe-load-balancers --load-balancer-name $ELB1 --region us-east-1 --output json |jq -c ".LoadBalancerDescriptions|.[0]|.CanonicalHostedZoneNameID" |tr -d '"')
ELBID2=$(aws elb describe-load-balancers --load-balancer-name $ELB2 --region us-east-1 --output json |jq -c ".LoadBalancerDescriptions|.[0]|.CanonicalHostedZoneNameID" |tr -d '"')
ELBNAME1=$(aws elb describe-load-balancers --load-balancer-name $ELB1 --region us-east-1 --output json |jq -c ".LoadBalancerDescriptions|.[0]|.CanonicalHostedZoneName" |tr -d '"')
ELBNAME2=$(aws elb describe-load-balancers --load-balancer-name $ELB2 --region us-east-1 --output json |jq -c ".LoadBalancerDescriptions|.[0]|.CanonicalHostedZoneName" |tr -d '"')
for DMN in blabla.com blabla.au.com blabla.com.mx blabla.co.nz
do
DMNID=$(aws route53 list-hosted-zones --output text |grep $DMN |cut -f3 |cut -d'/' -f3)
echo "{ \"Action\": \"UPSERT\",\"ResourceRecordSet\": $(aws route53 list-resource-record-sets --hosted-zone-id $DMNID --start-record-name $DMN --start-record-type A --max-items 1 --output json |jq -c ".ResourceRecordSets|.[0]")}" > /tmp/REC1
echo "{ \"Comment\": \"Change simple record to a weighted one\",\"Changes\": [" > /tmp/REC2
echo "$(cat /tmp/REC1)" |sed "s/$ELBID2/$ELBID1/i;s/$ELBNAME2/$ELBNAME1/i" >> /tmp/REC2
echo "]}" >> /tmp/REC2
aws route53 change-resource-record-sets --hosted-zone-id $DMNID --change-batch file:///tmp/REC2
done
EUELB1=awseb-e-q-AWSEBLoa-ATQCBI53FZ
EUELB2=awseb-e-m-AWSEBLoa-KNNDTCNDVL
ELB1=awseb-e-d-AWSEBLoa-15O5LTKCQDQ
ELB2=awseb-e-n-AWSEBLoa-VUAAXPKSUE
ELBID1=$(aws elb describe-load-balancers --load-balancer-name $EUELB1 --region eu-west-1 --output json |jq -c ".LoadBalancerDescriptions|.[0]|.CanonicalHostedZoneNameID" |tr -d '"')
ELBID2=$(aws elb describe-load-balancers --load-balancer-name $EUELB2 --region eu-west-1 --output json |jq -c ".LoadBalancerDescriptions|.[0]|.CanonicalHostedZoneNameID" |tr -d '"')
ELBNAME1=$(aws elb describe-load-balancers --load-balancer-name $EUELB1 --region eu-west-1 --output json |jq -c ".LoadBalancerDescriptions|.[0]|.CanonicalHostedZoneName" |tr -d '"')
ELBNAME2=$(aws elb describe-load-balancers --load-balancer-name $EUELB2 --region eu-west-1 --output json |jq -c ".LoadBalancerDescriptions|.[0]|.CanonicalHostedZoneName" |tr -d '"')
for DMN in
do
DMNID=$(aws route53 list-hosted-zones --output text |grep $DMN |cut -f3 |cut -d'/' -f3)
echo "{ \"Action\": \"DELETE\",\"ResourceRecordSet\": $(aws route53 list-resource-record-sets --hosted-zone-id $DMNID --start-record-name $DMN --start-record-type A --max-items 1 --output json |jq -c ".ResourceRecordSets|.[0]")}" > /tmp/REC1
echo "{ \"Comment\": \"Change simple record to a weighted one\",\"Changes\": [" > /tmp/REC2
echo "$(cat /tmp/REC1)," >> /tmp/REC2
cat ~/tmp/REC1 |sed "s/DELETE/CREATE/;s/\"Name\"/\"Weight\":1,\"Name\"/;s/\}\}$/,\"SetIdentifier\":\"EU-PP1\"}},/" >> /tmp/REC2
cat ~/tmp/REC1 |sed "s/DELETE/CREATE/;s/$ELBID1/$ELBID2/i;s/$ELBNAME1/$ELBNAME2/i;s/\"Name\"/\"Weight\":0,\"Name\"/;s/\}\}$/,\"SetIdentifier\":\"EU-PP2\"}}]}/" >> /tmp/REC2
aws route53 change-resource-record-sets --hosted-zone-id $DMNID --change-batch file:///tmp/REC2
done
ELB=awseb-e-a-AWSEBLoa-1B1HFFI2I0XIP-1401699616.eu-west-1.elb.amazonaws.com # eu-proj-10-30
ELB=awseb-e-t-AWSEBLoa-70JH9OXMYPDE-537822105.us-east-1.elb.amazonaws.com # us-proj-10-30
ELB=awseb-e-n-AWSEBLoa-C836AOR5BDMT-470022676.eu-west-1.elb.amazonaws.com # eu-proj-2015-02-17-t2
ELB=awseb-e-2-AWSEBLoa-145VIWACSET2U-768161803.eu-west-1.elb.amazonaws.com # eu-proj-2015-02-17-t3
ELB=awseb-e-x-AWSEBLoa-1P6XKILC4ECV2-1574593594.eu-west-1.elb.amazonaws.com # eu-proj-2015-02-17-t4
ELB=awseb-e-g-AWSEBLoa-RKLPYM69WA68-1381825068.eu-west-1.elb.amazonaws.com # eu-proj-2015-02-17-t5
for ZID in $(aws route53 list-hosted-zones |jq '.HostedZones[]|.Id' |sed 's/.*\/\(.*\)"$/\1/')
do
aws route53 list-resource-record-sets --hosted-zone-id $ZID --output json |jq -c '.ResourceRecordSets[]' |grep -v 'Weight":0' |grep -i $ELB
done
STR=${1}
INS=$(aws ec2 describe-instances --filters '{"Name":"tag:Name","Values": ["*${STR}*"]}' --output json --region eu-west-1 |jq -c '.Reservations[].Instances[] | {(.Tags[] |select(.Key=="Name") |.Value): .PublicIpAddress}')
for IN in ${INS[@]}; do
echo $IN |sed 's/[\{\"\}]//g;s/:/ /'
done
Adding option -r
to jq
helps…
aws elasticbeanstalk describe-configuration-options --application-name merged-us --output json
aws elasticbeanstalk describe-configuration-settings --application-name merged-us --environment-name us-2013-10-09 --output json
fab -f ~/src/proj/fabfile.py \
-H ec2-55-222-222-123.eu-west-1.compute.amazonaws.com \
-u ubuntu mark_instance_offline:$RGN,$EB
ASG=$(aws --profile $PFL autoscaling describe-auto-scaling-groups \
--region $RGN \
--output json |jq \
-r ".AutoScalingGroups[]|.Tags[]|select(.Key == \"Name\")|select(.Value == \"$EB\")|.ResourceId")
aws --profile proj autoscaling update-auto-scaling-group \
--min-size 0 \
--region $RGN \
--auto-scaling-group-name $ASG
from fabric.api import *
from fabric.contrib.files import sed
env.hosts=["55.222.222.123"]
env.user="ubuntu"
def instance_offline(iid):
file_name = run("grep -rl %s /omd/sites/omdeu/etc/check_mk/conf.d/" % iid)
sed(file_name, before=iid, after="%s|offline" % iid, use_sudo=True, backup='')
sudo("cmk -C", user="omdeu")
http://devblog.moz.com/2011/08/launching-and-deploying-instances-with-boto-and-fabric/
openssl pkcs12 -export -in ssl.crt/wildcard.blabla.tv.crt -inkey ssl.key/wildcard.blabla.tv.key -CAfile ssl.crt/rapidssl-bundle.crt -out wildcard.blabla.tv.pem -passin pass:E5bwty3h -passout pass:E5bwty3h
keytool -importkeystore -destkeystore wildcard.blabla.tv.jks -srckeystore wildcard.blabla.tv.pem -srcstoretype PKCS12 -srcstorepass E5bwty3h -deststorepass E5bwty3h
keytool -list -keystore wildcard.blabla.tv.jks -storepass E5bwty3h
keytool -importkeystore -srckeystore live.blabla.tv.jks -destkeystore live.blabla.tv.p12 -srcstoretype JKS -deststoretype PKCS12 -srcstorepass E5bwty3h -deststorepass E5bwty3h -srcalias wowza -destalias wowza -srckeypass E5bwty3h -destkeypass E5bwty3h -noprompt
openssl pkcs12 -in live.blabla.tv.p12 -out live.blabla.tv.pem -passin pass:E5bwty3h -passout pass:E5bwty3h
Для использования ключа из него надо удалить пароль:
openssl rsa -in live.blabla.tv.pem -out live.blabla.tv.key
List currently installed certificates
aws iam list-server-certificates
Upload a new certificate
# upload certificate
aws iam upload-server-certificate \
--server-certificate-name $SCN \
--certificate-body file://$SCN-chain.pem \
--private-key file://$SCN-private.pem \
--path /cloudfront/
Delete certificate
# upload certificate
aws iam delete-server-certificate \
--server-certificate-name $DMN
# check beanstalk settings
aws elasticbeanstalk describe-environments
aws elasticbeanstalk describe-configuration-settings --application-name 'Blabla Wowza Beanstalk' --environment-name blablaWowzaBeanst-env --output json
# update EB https settings
aws elasticbeanstalk update-environment --environment-name blablaWowzaBeanst-env --option-settings Namespace="aws:elb:loadbalancer",OptionName="SSLCertificateId",Value="arn:aws:iam::072941981223:server-certificate/live.blabla.tv"
aws elasticbeanstalk update-environment --environment-name blablaWowzaBeanst-env --option-settings Namespace="aws:elb:loadbalancer",OptionName="LoadBalancerHTTPPort",Value="OFF"
aws elb describe-load-balancers --output json
aws elb delete-load-balancer-listeners --load-balancer-name awseb-e-8-AWSEBLoa-2C7CQ3M4M0 --load-balancer-ports 443
aws elb create-load-balancer-listeners --load-balancer-name awseb-e-8-AWSEBLoa-2C7CQ3M4M0 --listeners Protocol=HTTPS,LoadBalancerPort=443,InstanceProtocol=HTTP,InstancePort=1935,SSLCertificateId="arn:aws:iam::072941981298:server-certificate/wildcard.blabla.tv"
aws iam get-server-certificate --server-certificate-name blabla_2016 |jq '.ServerCertificate.CertificateBody' |tr -d '"' |sed 's/\\n/\n/g' |openssl x509 -text -noout
http://alestic.com/2012/09/aws-command-line-tools
aws iam get-user --user-name user
for IU in $(aws iam list-users --query 'Users[].UserName' --output text); do
aws iam list-access-keys \
--user-name $IU \
--query 'AccessKeyMetadata[].[UserName,AccessKeyId]' \
--output text
done
# declare -A USELB
# declare -A EUELB
# USELB=([usc]='awseb-e-s-AWSEBLoa-1B5JFK6CCROJ0' [uspp]='awseb-e-d-AWSEBLoa-15O5LTKLCQGDQ')
# EUELB=([euc]='awseb-e-j-AWSEBLoa-1L9GEVVE1OCXJ' [eupp]='awseb-e-q-AWSEBLoa-ATQCBIJ53ZFZ')
USPP='awseb-e-d-AWSEBLoa-15O5LTKLCQGDQ'
USAPI='awseb-e-u-AWSEBLoa-1P2PC9L8ZWSJR'
EUPP='awseb-e-q-AWSEBLoa-ATQCBIJ53ZFZ'
EUAPI='awseb-e-q-AWSEBLoa-QGQ1AF4NV70L'
# CUR=$(date +%FT%T)
# YSD=$(date +%FT%T -d "24 hours ago")
CUR='2013-12-09T00:00'
YSD='2013-12-08T00:00'
PER=900
aws cloudwatch get-metric-statistics --region us-east-1 \
--start-time ${YSD} --end-time ${CUR} --period ${PER} \
--statistics "Average" --namespace AWS/ELB --metric-name Latency \
--dimensions "Name=LoadBalancerName,Value=${USPP}" --output text > US-PP-Latency-20131208.txt
for i in RequestCount HTTPCode_Backend_5XX HTTPCode_Backend_2XX HTTPCode_ELB_5XX; do aws cloudwatch get-metric-statistics --region us-east-1 \
--start-time ${YSD} --end-time ${CUR} --period ${PER} \
--statistics "Sum" --namespace AWS/ELB --metric-name $i \
--dimensions "Name=LoadBalancerName,Value=${USPP}" --output text > US-PP-${i}-20131208.txt; done
aws cloudwatch get-metric-statistics --region us-east-1 \
--start-time ${YSD} --end-time ${CUR} --period ${PER} \
--statistics "Average" --namespace AWS/ELB --metric-name Latency \
--dimensions "Name=LoadBalancerName,Value=${USAPI}" --output text > US-API-Latency-20131208.txt
for i in RequestCount HTTPCode_Backend_5XX HTTPCode_Backend_2XX HTTPCode_ELB_5XX; do aws cloudwatch get-metric-statistics --region us-east-1 \
--start-time ${YSD} --end-time ${CUR} --period ${PER} \
--statistics "Sum" --namespace AWS/ELB --metric-name $i \
--dimensions "Name=LoadBalancerName,Value=${USAPI}" --output text > US-API-${i}-20131208.txt; done
aws cloudwatch get-metric-statistics --region eu-west-1 \
--start-time ${YSD} --end-time ${CUR} --period ${PER} \
--statistics "Average" --namespace AWS/ELB --metric-name Latency \
--dimensions "Name=LoadBalancerName,Value=${EUPP}" --output text > EU-PP-Latency-20131208.txt
for i in RequestCount HTTPCode_Backend_5XX HTTPCode_Backend_2XX HTTPCode_ELB_5XX; do aws cloudwatch get-metric-statistics --region eu-west-1 \
--start-time ${YSD} --end-time ${CUR} --period ${PER} \
--statistics "Sum" --namespace AWS/ELB --metric-name $i \
--dimensions "Name=LoadBalancerName,Value=${EUPP}" --output text > EU-PP-${i}-20131208.txt; done
aws cloudwatch get-metric-statistics --region eu-west-1 \
--start-time ${YSD} --end-time ${CUR} --period ${PER} \
--statistics "Average" --namespace AWS/ELB --metric-name Latency \
--dimensions "Name=LoadBalancerName,Value=${EUAPI}" --output text > EU-API-Latency-20131208.txt
for i in RequestCount HTTPCode_Backend_5XX HTTPCode_Backend_2XX HTTPCode_ELB_5XX; do aws cloudwatch get-metric-statistics --region eu-west-1 \
--start-time ${YSD} --end-time ${CUR} --period ${PER} \
--statistics "Sum" --namespace AWS/ELB --metric-name $i \
--dimensions "Name=LoadBalancerName,Value=${EUAPI}" --output text > EU-API-${i}-20131208.txt; done
sudo add-apt-repository --yes ppa:rquillo/ansible
sudo apt-get --assume-yes install ansible
AIDIR=/etc/ansible/inventory
sudo mkdir ${AIDIR}
sudo wget -O ${AIDIR}/ec2.py https://raw.github.com/ansible/ansible/devel/plugins/inventory/ec2.py
sudo wget -O ${AIDIR}/ec2.ini https://raw.github.com/ansible/ansible/devel/plugins/inventory/ec2.ini
export ANSIBLE_HOSTS=/etc/ansible/inventory # or change hostfile in /etc/ansible/ansible.cfg or ~/.ansible.cfg
export ANSIBLE_HOST_KEY_CHECKING=False # or change host_key_checking in /etc/ansible/ansible.cfg or ~/.ansible.cfg
export ANSIBLE_NOCOWS=1 # or export ANSIBLE_COW_SELECTION=random
#export EC2_CERT=~/.ssh/cert-XXX.pem
#export EC2_PRIVATE_KEY=~/.ssh/pk-XXX.pem
# listing PP 2013-08-08 instances in US
ansible 'us-east-1:&*-pp-*08-08' --list
# check alive instances
ansible -u ubuntu --private-key=/home/user/.ssh/au_key.pem ap-southeast-2 -m ping
# clear template dirs on all EU 2013-08-08 PP instances
ansible -u ec2-user --private-key=/home/user/.ssh/eu_key.pem 'eu-west-1:&*-pp-*08-08' -m file -a 'dest=/var/www/html/public/cache/templates/ state=absent'
# restart memcached
ansible -u ec2-user --private-key=/home/user/.ssh/eu_key.pem 'eu-west-1:&*-pp-*08-08' -m service -a "name=memcached state=restarted"
# flush memcached cache
ansible -u ec2-user --private-key=/home/user/.ssh/eu_key.pem 'eu-west-1:&*-pp-*08-08' -a "echo 'flush_all' | nc localhost 11211"
# get new localization
ansible -u ec2-user --private-key=/home/user/.ssh/eu_key.pem 'eu-west-1:&*-pp-*08-08' \
-a 'cd /var/app/pp/data/localization; wget https://s3.amazonaws.com/pp-locale/data/localizations.tar.gz; tar xzf localizations.tar.gz'
https://github.com/debops/ansible-nginx http://blog.josephkahn.io/articles/ansible-modules/ https://groups.google.com/forum/#!topic/ansible-project/GfJBkzuTTNM
Parallel Stack Processing and Nested Stack Updates for AWS CloudFormation Understanding Nested CloudFormation Stacks Developers Tools Балансировщик нагрузки для Amazon EC2 c автомасштабированием AWSDocs: Ruby AWS::CloudWatch Class github::aws::aws-sdk-ruby Comparison Analysis:Amazon ELB vs HAProxy EC2 (Jul 2013) 10 Things You Should Know About AWS
# Automated AMI Backups
#
# @author Robert Kozora <robert.kozora@wheaton.com>
#
# This script will search for all instances having a tag with "Backup" or "backup"
# on it. As soon as we have the instances list, we loop through each instance
# and create an AMI of it. Also, it will look for a "Retention" tag key which
# will be used as a retention policy number in days. If there is no tag with
# that name, it will use a 7 days default value for each AMI.
#
# After creating the AMI it creates a "DeleteOn" tag on the AMI indicating when
# it will be deleted using the Retention value and another Lambda function
import boto3
import collections
import datetime
ec = boto3.client('ec2')
def lambda_handler(event, context):
reservations = ec.describe_instances(
Filters=[
{'Name': 'tag-key', 'Values': ['backup', 'Backup']},
]
).get(
'Reservations', []
)
instances = sum(
[
[i for i in r['Instances']]
for r in reservations
], [])
print "Found %d instances that need backing up" % len(instances)
to_tag = collections.defaultdict(list)
for instance in instances:
try:
retention_days = [
int(t.get('Value')) for t in instance['Tags']
if t['Key'] == 'Retention'][0]
except IndexError:
retention_days = 7
# Loop through instance's EBS volumes
#for dev in instance['BlockDeviceMappings']:
# if dev.get('Ebs', None) is None:
# continue
# vol_id = dev['Ebs']['VolumeId']
# print "Found EBS volume %s on instance %s" % (
# vol_id, instance['InstanceId'])
#snap = ec.create_snapshot(
# VolumeId=vol_id,
#)
#create_image(instance_id, name, description=None, no_reboot=False, block_device_mapping=None, dry_run=False)
# DryRun, InstanceId, Name, Description, NoReboot, BlockDeviceMappings
create_time = datetime.datetime.now()
create_fmt = create_time.strftime('%Y-%m-%d.%H.%M.%S')
AMIid = ec.create_image(InstanceId=instance['InstanceId'], Name="Lambda - " + instance['InstanceId'] + " From " + create_fmt, Description="Lambda created AMI of instance " + instance['InstanceId'], NoReboot=True, DryRun=False)
to_tag[retention_days].append(AMIid['ImageId'])
print "Retaining AMI %s of instance %s for %d days" % (
AMIid['ImageId'],
instance['InstanceId'],
retention_days,
)
for retention_days in to_tag.keys():
delete_date = datetime.date.today() + datetime.timedelta(days=retention_days)
delete_fmt = delete_date.strftime('%m-%d-%Y')
print "Will delete %d AMIs on %s" % (len(to_tag[retention_days]), delete_fmt)
ec.create_tags(
Resources=to_tag[retention_days],
Tags=[
{'Key': 'DeleteOn', 'Value': delete_fmt},
]
)
# Automated AMI and Snapshot Deletion
#
# @author Robert Kozora <robert.kozora@wheaton.com>
#
# This script will search for all instances having a tag with "Backup" or "backup"
# on it. As soon as we have the instances list, we loop through each instance
# and reference the AMIs of that instance. We check that the latest daily backup
# succeeded then we store every image that's reached its DeleteOn tag's date for
# deletion. We then loop through the AMIs, deregister them and remove all the
# snapshots associated with that AMI.
import boto3
import collections
import datetime
import time
import sys
ec = boto3.client('ec2', 'us-east-1')
ec2 = boto3.resource('ec2', 'us-east-1')
images = ec2.images.filter(Owners=["xxxxxx"])
def lambda_handler(event, context):
reservations = ec.describe_instances(
Filters=[
{'Name': 'tag-key', 'Values': ['backup', 'Backup']},
]
).get(
'Reservations', []
)
instances = sum(
[
[i for i in r['Instances']]
for r in reservations
], [])
print "Found %d instances that need evaluated" % len(instances)
to_tag = collections.defaultdict(list)
date = datetime.datetime.now()
date_fmt = date.strftime('%Y-%m-%d')
imagesList = []
# Set to true once we confirm we have a backup taken today
backupSuccess = False
# Loop through all of our instances with a tag named "Backup"
for instance in instances:
imagecount = 0
# Loop through each image of our current instance
for image in images:
# Our other Lambda Function names its AMIs Lambda - i-instancenumber.
# We now know these images are auto created
if image.name.startswith('Lambda - ' + instance['InstanceId']):
# print "FOUND IMAGE " + image.id + " FOR INSTANCE " + instance['InstanceId']
# Count this image's occcurance
imagecount = imagecount + 1
try:
if image.tags is not None:
deletion_date = [
t.get('Value') for t in image.tags
if t['Key'] == 'DeleteOn'][0]
delete_date = time.strptime(deletion_date, "%m-%d-%Y")
except IndexError:
deletion_date = False
delete_date = False
today_time = datetime.datetime.now().strftime('%m-%d-%Y')
# today_fmt = today_time.strftime('%m-%d-%Y')
today_date = time.strptime(today_time, '%m-%d-%Y')
# If image's DeleteOn date is less than or equal to today,
# add this image to our list of images to process later
if delete_date <= today_date:
imagesList.append(image.id)
# Make sure we have an AMI from today and mark backupSuccess as true
if image.name.endswith(date_fmt):
# Our latest backup from our other Lambda Function succeeded
backupSuccess = True
print "Latest backup from " + date_fmt + " was a success"
print "instance " + instance['InstanceId'] + " has " + str(imagecount) + " AMIs"
print "============="
print "About to process the following AMIs:"
print imagesList
if backupSuccess == True:
snapshots = ec.describe_snapshots(MaxResults=1000, OwnerIds=['xxxxx'])['Snapshots']
# loop through list of image IDs
for image in imagesList:
print "deregistering image %s" % image
amiResponse = ec.deregister_image(
DryRun=False,
ImageId=image,
)
for snapshot in snapshots:
if snapshot['Description'].find(image) > 0:
snap = ec.delete_snapshot(SnapshotId=snapshot['SnapshotId'])
print "Deleting snapshot " + snapshot['SnapshotId']
print "-------------"
else:
print "No current backup found. Termination suspended."
sudo pip install --update awscli
cat > ~/.bashrc << END
export AWS_ACCESS_KEY_ID={key_id}
export AWS_SECRET_ACCESS_KEY={secret_key}
export AWS_DEFAULT_REGION={region}
export AWS_DEFAULT_OUTPUT={output_type}
END
sudo apt-get install default-jdk
sudo sed -i '1iJAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64' /etc/environment
cat > ~/.credential-file << END
AWSAccessKeyId=xxx
AWSSecretKey=xxx
END
chmod 600 ~/.credential-file
echo 'export AWS_CREDENTIAL_FILE=~/.credential-file' >> ~/.bashrc
wget http://s3.amazonaws.com/ec2-downloads/ec2-api-tools.zip
cd /opt
sudo unzip ~/Downloads/ec2-api-tools.zip
sudo sed -i '1iEC2_HOME=/opt/ec2-api-tools' /etc/environment
sudo sed -i 's/^PATH="\(.*\)/PATH="${EC2_HOME}:${EC2_HOME}\/bin:\1/' /etc/environment
wget http://ec2-downloads.s3.amazonaws.com/ElasticLoadBalancing.zip
unzip ElasticLoadBalancing.zip
sudo mv ElasticLoadBalancing /opt/
sudo sed -i '1iAWS_ELB_HOME=/opt/ElasticLoadBalancing' /etc/environment
sudo sed -i 's/^PATH="\(.*\)/PATH="${AWS_ELB_HOME}:${AWS_ELB_HOME}\/bin:\1/' /etc/environment
wget http://ec2-downloads.s3.amazonaws.com/AutoScaling.zip
unzip AutoScaling.zip
sudo mv AutoScaling /opt/
sudo sed -i '1iAWS_AUTO_SCALING_HOME=/opt/AutoScaling' /etc/environment
sudo sed -i 's/^PATH="\(.*\)/PATH="${AWS_AUTO_SCALING_HOME}:${AWS_AUTO_SCALING_HOME}\/bin:\1/' /etc/environment
wget http://ec2-downloads.s3.amazonaws.com/CloudWatch.zip
unzip CloudWatch.zip
sudo mv CloudWatch /opt/
sudo sed -i '1iAWS_CLOUDWATCH_HOME=/opt/CloudWatch' /etc/environmenty
sudo sed -i 's/^PATH="\(.*\)/PATH="${AWS_CLOUDWATCH_HOME}:${AWS_CLOUDWATCH_HOME}\/bin:\1/' /etc/environment
wget http://ec2-downloads.s3.amazonaws.com/IAMCli.zip
unzip IAMCli.zip
sudo mv IAMCli /opt/
sudo sed -i '1iAWS_IAM_HOME=/opt/IAMCli' /etc/environment
sudo sed -i 's#^PATH="\(.*\)#PATH="${AWS_IAM_HOME}:${AWS_IAM_HOME}\/bin:\1#' /etc/environment