Skip to content

Commit

Permalink
Configure deployment pipeline & DNS for superchallenge bot
Browse files Browse the repository at this point in the history
  • Loading branch information
emk committed Aug 13, 2017
1 parent ac09a6b commit 7279eb5
Show file tree
Hide file tree
Showing 6 changed files with 144 additions and 1 deletion.
11 changes: 11 additions & 0 deletions dns.tf
Expand Up @@ -17,6 +17,17 @@ resource "aws_route53_record" "forum" {
records = ["${module.language_learners_server.public_ip}"]
}

# Our "super-challenge" record.
resource "aws_route53_record" "super_challenge" {
zone_id = "${aws_route53_zone.primary.zone_id}"
name = "super-challenge"
type = "A"
ttl = "300"

# Get the IP address of our server's Elastic IP.
records = ["${module.language_learners_server.public_ip}"]
}

# An "old-forum" record while we're migrating.
resource "aws_route53_record" "old-forum" {
zone_id = "${aws_route53_zone.primary.zone_id}"
Expand Down
Binary file modified ecs_deployer_lambda.zip
Binary file not shown.
6 changes: 5 additions & 1 deletion ecs_deployer_lambda/index.js
Expand Up @@ -60,8 +60,12 @@ exports.handler = (event, context, callback) => {
//console.log("taskDefinition:", taskDefinition);

// Figure out which containers need to be updated, and update them.
// This will only work for containers that are either:
//
// 1. Named after our service.
// 2. Have a name which starts with the service name plus "-".
for (const container of taskDefinition.containerDefinitions) {
if (container.name === ecsService) {
if (container.name === ecsService || container.name.indexOf(ecsService + "-") === 0) {
//console.log(container);
const newImage = container.image.replace(/:([^:]*)$/, ":" + imageTag);
console.log("Updating container to use new image:", container.name, newImage);
Expand Down
82 changes: 82 additions & 0 deletions superchallengebot-container-definitions.json
@@ -0,0 +1,82 @@
[
{
"volumesFrom": [],
"memory": 128,
"extraHosts": null,
"dnsServers": null,
"disableNetworking": null,
"dnsSearchDomains": null,
"portMappings": [],
"hostname": null,
"essential": true,
"entryPoint": null,
"mountPoints": [
{
"containerPath": "/var/www/html/config-files",
"sourceVolume": "config-files",
"readOnly": true
}
],
"name": "superchallengebot",
"ulimits": null,
"dockerSecurityOptions": null,
"environment": [
{
"name": "VIRTUAL_HOST",
"value": "super-challenge.language-learners.org"
},
{
"name": "LETSENCRYPT_HOST",
"value": "super-challenge.language-learners.org"
},
{
"name": "LETSENCRYPT_EMAIL",
"value": "letsencrypt@randomhacks.net"
}
],
"links": null,
"workingDirectory": null,
"readonlyRootFilesystem": null,
"image": "${image}",
"command": null,
"user": null,
"dockerLabels": null,
"logConfiguration": null,
"cpu": 0,
"privileged": null,
"memoryReservation": null
},
{
"volumesFrom": [],
"memory": 64,
"extraHosts": null,
"dnsServers": null,
"disableNetworking": null,
"dnsSearchDomains": null,
"portMappings": [],
"hostname": null,
"essential": true,
"entryPoint": null,
"mountPoints": [
{
"containerPath": "/var/www/html/config-files",
"sourceVolume": "config-files",
"readOnly": true
}
],
"name": "superchallengebot-cron",
"ulimits": null,
"dockerSecurityOptions": null,
"links": null,
"workingDirectory": null,
"readonlyRootFilesystem": null,
"image": "${image}",
"command": ["/var/www/html/worker"],
"user": null,
"dockerLabels": null,
"logConfiguration": null,
"cpu": 0,
"privileged": null,
"memoryReservation": null
}
]
46 changes: 46 additions & 0 deletions superchallengebot_pipeline.tf
@@ -0,0 +1,46 @@
# This defines the CodePipeline that builds and deploys our Super Challenge
# service.

# Use a module to do all the hard work.
module "superchallengebot_pipeline" {
source = "github_ecs_pipeline"
name = "superchallengebot"
github_repo = "superchallengebot"
github_branch = "master"

# Pass our taskdef information to the module.
taskdef_family = "${aws_ecs_task_definition.superchallengebot.family}"
taskdef_revision = "${aws_ecs_task_definition.superchallengebot.revision}"

# Standard parameters which are the same for all pipelines.
aws_region = "${var.aws_region}"
aws_account_id = "${var.aws_account_id}"
pipeline_role_arn = "${aws_iam_role.codepipeline_role.arn}"
build_role_arn = "${aws_iam_role.codebuild_role.arn}"
artifact_store_s3_bucket = "${aws_s3_bucket.codepipeline_artifacts.bucket}"
ecs_cluster = "${aws_ecs_cluster.language_learners.name}"
notification_topic_arn = "${aws_sns_topic.admin_updates.arn}"
}

# Load our container definitions from a template file.
data "template_file" "superchallengebot_container_definitions" {
template = "${file("${path.module}/superchallengebot-container-definitions.json")}"
vars {
image = "771600087445.dkr.ecr.us-east-1.amazonaws.com/superchallengebot:latest"
}
}

# Declare our ECS task definition for use by the pipeline. The `family`
# field here must match the module's `name` above.
resource "aws_ecs_task_definition" "superchallengebot" {
family = "superchallengebot"
container_definitions = "${data.template_file.superchallengebot_container_definitions.rendered}"

# Define our volumes. These are used to map directories on our
# EBS-backed `/data` volume to volume names referred to in our
# `*-container-definitions.json` file.
volume {
name = "config-files"
host_path = "/data/superchallengebot/config-files"
}
}
Binary file modified terraform.tfstate
Binary file not shown.

0 comments on commit 7279eb5

Please sign in to comment.