Deploying an application to AWS ECS with S3 integration and IAM policies/roles using Terraform

In this post I’ll share a simple Node.js application with AWS S3 connectivity and the Terraform configuration files I used to provision the architecture in AWS ECS. I included S3 integration in this guide to show how IAM policies can be used with ECS tasks via Terraform.

I started by creating a simple Node.JS/Express app. To demonstrate S3 connectivity, on application start, it copies a text file containing the date into an S3 bucket, and subsequent HTTP requests fetch the file and return its contents. new file: express/app.js

const express = require('express')
const app = express()
const app_port = 80

# initialize AWS S3 connection and push the current date to a text file
const AWS = require('aws-sdk')
const s3 = new AWS.S3()
const s3_bucket = process.env.S3_DATA_BUCKET
const s3_put_params = {
  Body: (new Date()).toISOString(),
  Bucket: s3_bucket,
  Key: 'app_start.txt'
}
s3.putObject(s3_put_params, function(err, data){
  if (err) console.log(err);
  else console.log(data);
})
const s3_get_params = {
  Bucket: s3_bucket,
  Key: 'app_start.txt'
}

app.get('/', function (req, res) {
  s3.getObject(s3_get_params, function(err, data){
    if (err) res.status(err.statusCode).send(err.message)
    else res.send(data.Body.toString())
  })
})

app.listen(app_port, function () {
  console.log('App starting on port: ' + app_port)
})

I created a simple Dockerfile that inherits from the Node.js image, copies the app.js and package.json files, and installs NPM packages. new file: express/Dockerfile

FROM node:6.11.1

ENV APP_HOME /express
WORKDIR $APP_HOME

COPY app.js $APP_HOME
COPY package.json $APP_HOME

RUN npm install

Next I added a shell script to build the Node.js image, tag it, and push the image to Amazon ECR. New file: express/build.sh

#!/bin/bash

set -e

cd "$(dirname "$0")"
source ../terraform/.env

AWS_ACCOUNT_ID=$(aws sts get-caller-identity --output text --query 'Account' --profile $AWS_PROFILE)

# ensure ECR repository exists
existing_repo=$(aws --profile $AWS_PROFILE ecr describe-repositories --query "repositories[?repositoryName=='${ECR_REPO}'].repositoryName" --output text)
if [ -z $existing_repo ]; then
  aws --profile $AWS_PROFILE ecr create-repository --repository-name $ECR_REPO
fi

# build, tag, push to AWS ECR
docker_login=$(aws ecr get-login --no-include-email --region $AWS_REGION)
eval $docker_login
docker build -t $ECR_REPO .
docker tag $ECR_REPO:latest $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ECR_REPO:latest
docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ECR_REPO:latest

The above script (and following terraform script) loads environment variables from a “.env” file. Example file: terraform/.env

AWS_KEY_NAME=
AWS_PROFILE=default
AWS_REGION=us-east-1
AWS_SECURITY_GROUP_IDS='["sg-########"]'
AWS_SUBNET_ID=subnet-########
EC2_AMI_ID=ami-04351e12
EC2_INSTANCE_TYPE=m4.large
ECR_REPO=eric-test/express
S3_DATA_BUCKET=eric-express-data
TF_STATE_BUCKET=eric-terraform-state
TF_STATE_KEY=terraform/ecs.tfstate

Part 2: Terraform

The first file I added is used to configure the Terraform backend to store state in S3. I left the values empty since they are loaded from the “.env” file and passed to Terraform via terraform init. New file: terraform/backend.tf

terraform {
  # Stores the Terraform state in S3
  # https://www.terraform.io/docs/backends/types/s3.html
  backend "s3" {
    bucket  = ""
    key     = ""
    profile = ""
    region  = ""
  }
}

I added a variables file which will also be loaded from “.env”. new file: terraform/vars.tf

provider "aws" {
  region = "${var.aws_region}"
  profile = "${var.aws_profile}"
  max_retries = "10"
}

variable "aws_key_name" {
  type = "string"
}

variable "aws_profile" {
  type = "string"
}

variable "aws_region" {
  type = "string"
  default = "us-east-1"
}

variable "aws_security_group_ids" {
  type = "list"
}

variable "aws_subnet_id" {
  type = "string"
}

variable "ec2_ami_id" {
  type = "string"
}

variable "ec2_instance_type" {
  type = "string"
}

variable "express_ecr_image" {
  type = "string"
}

variable "s3_data_bucket" {
  type = "string"
}

Next I added a file to contain S3 specific configurations. It provides an S3 bucket and IAM policy to define the S3 permissions used by the ECS application role. New file: terraform/s3.tf

# AWS S3 bucket
# TF: https://www.terraform.io/docs/providers/aws/r/s3_bucket.html
# AWS: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html
resource "aws_s3_bucket" "s3_data_bucket" {
  bucket = "${var.s3_data_bucket}"
  acl    = "private"

  tags {
    Name = "Eric Data Bucket"
  }
}

# [Data] IAM policy to define S3 permissions
# TF: https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html
# AWS: http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/iam/create-policy.html
data "aws_iam_policy_document" "s3_data_bucket_policy" {
  statement {
    sid = ""
    effect = "Allow"
    actions = [
      "s3:ListBucket"
    ]
    resources = [
      "arn:aws:s3:::${aws_s3_bucket.s3_data_bucket.bucket}"
    ]
  }
  statement {
    sid = ""
    effect = "Allow"
    actions = [
      "s3:DeleteObject",
      "s3:GetObject",
      "s3:PutObject",
      "s3:PutObjectAcl"
    ]
    resources = [
      "arn:aws:s3:::${aws_s3_bucket.s3_data_bucket.bucket}/*"
    ]
  }
}

# AWS IAM policy
# TF: https://www.terraform.io/docs/providers/aws/r/iam_policy.html
# AWS: http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/iam/create-policy.html
resource "aws_iam_policy" "s3_policy" {
  name   = "eric-s3-policy"
  policy = "${data.aws_iam_policy_document.s3_data_bucket_policy.json}"
}

# Attaches a managed IAM policy to an IAM role
# TF: https://www.terraform.io/docs/providers/aws/r/iam_role_policy_attachment.html
# AWS: http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/iam/attach-role-policy.html
resource "aws_iam_role_policy_attachment" "ecs_role_s3_data_bucket_policy_attach" {
  role       = "${aws_iam_role.ecs_role.name}"
  policy_arn = "${aws_iam_policy.s3_policy.arn}"
}

The following Terraform file defines the remainder of the ECS infrastructure. This includes an ECS cluster, IAM policy document (to allow ECS tasks to assume a role), an IAM role, an ECS task definition (which uses the Node.js ECR image), an ECS service which manages the ECS task, and an EC2 instance. new file: terraform/ecs.tf

# AWS ECS cluster
# TF: https://www.terraform.io/docs/providers/aws/r/ecs_cluster.html
# AWS: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_clusters.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/ecs/create-cluster.html
resource "aws_ecs_cluster" "eric_express" {
  name = "eric-express"
}

# [Data] IAM policy document (to allow ECS tasks to assume a role)
# TF: https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html
# AWS: http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/iam/create-policy.html
data "aws_iam_policy_document" "ecs_assume_role_policy" {
  statement {
    sid = ""
    effect = "Allow"
    actions = [
      "sts:AssumeRole",
    ]
    principals {
      type = "Service"
      identifiers = ["ecs-tasks.amazonaws.com"]
    }
  }
}

# AWS IAM role (to allow ECS tasks to assume a role)
# TF: https://www.terraform.io/docs/providers/aws/r/iam_role.html
# AWS: http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/iam/create-role.html
resource "aws_iam_role" "ecs_role" {
  name               = "eric-ecs-role"
  assume_role_policy = "${data.aws_iam_policy_document.ecs_assume_role_policy.json}"
}

# [Data] AWS ECS task definition
# Simply specify the family to find the latest ACTIVE revision in that family.
# TF: https://www.terraform.io/docs/providers/aws/d/ecs_task_definition.html
# AWS: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/ecs/register-task-definition.html
data "aws_ecs_task_definition" "express_ecs_task_definition" {
  task_definition = "${aws_ecs_task_definition.express_ecs_task_definition.family}"
}

# AWS ECS task definition
# TF: https://www.terraform.io/docs/providers/aws/d/ecs_task_definition.html
# AWS: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/ecs/register-task-definition.html
resource "aws_ecs_task_definition" "express_ecs_task_definition" {
  family                = "eric-express"
  task_role_arn         = "${aws_iam_role.ecs_role.arn}"
  container_definitions = <<DEFINITION
[
  {
    "name": "express",
    "command": ["node", "app.js"],
    "essential": true,
    "image": "${var.express_ecr_image}",
    "memoryReservation": 128,
    "privileged": false,
    "portMappings": [
      {
        "hostPort": 80,
        "containerPort": 80,
        "protocol": "tcp"
      }
    ],
    "environment": [
      {
        "name": "S3_DATA_BUCKET",
        "value": "${aws_s3_bucket.s3_data_bucket.bucket}"
      }
    ]
  }
]
DEFINITION
}

# AWS ECS service
# TF: https://www.terraform.io/docs/providers/aws/r/ecs_service.html
# AWS: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/ecs/create-service.html
resource "aws_ecs_service" "express_ecs_service" {
  name            = "eric-express-ecs-service"
  cluster         = "${aws_ecs_cluster.eric_express.id}"
  desired_count   = 1
  task_definition = "${aws_ecs_task_definition.express_ecs_task_definition.family}:${max("${aws_ecs_task_definition.express_ecs_task_definition.revision}", "${data.aws_ecs_task_definition.express_ecs_task_definition.revision}")}"
}

# AWS EC2 instance
# TF: https://www.terraform.io/docs/providers/aws/r/instance.html
# AWS: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Instances.html
# AWS CLI: http://docs.aws.amazon.com/cli/latest/reference/ec2/run-instances.html
resource "aws_instance" "express_ec2" {
  ami                    = "${var.ec2_ami_id}"
  instance_type          = "${var.ec2_instance_type}"
  key_name               = "${var.aws_key_name}"
  subnet_id              = "${var.aws_subnet_id}"
  vpc_security_group_ids = "${var.aws_security_group_ids}"
  iam_instance_profile   = "ecsInstanceRole"
  tags {
    name = "Eric Express ECS"
  }
  user_data = <<SCRIPT
#!/bin/bash
echo ECS_CLUSTER=${aws_ecs_cluster.eric_express.name} >> /etc/ecs/ecs.config
SCRIPT
}

output "PrivateIP" {
  value = "http://${aws_instance.express_ec2.private_ip}"
}

output "PublicIP" {
  value = "http://${aws_instance.express_ec2.public_ip}"
}

Last I added a Terraform provisioning shell script. It handles the loading and passing of environment variables, ensuring the s3 state bucket exists, initializes the backend, and executes the Terraform command. New file: terraform/provision.sh

#!/bin/bash

set -e

cd "$(dirname "$0")"
source .env

TF_COMMAND=$1

if [ -z $TF_COMMAND ]; then
  echo "Usage: ./provision.sh [plan|apply|destroy]"
  exit 1
fi

AWS_ACCOUNT_ID=$(aws sts get-caller-identity --output text --query 'Account' --profile $AWS_PROFILE)
EXPRESS_ECR_IMAGE="${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO}"

# ensure terraform state bucket exists
existing_tf_bucket=$(aws --profile $AWS_PROFILE s3api list-buckets --query "Buckets[?Name=='${TF_STATE_BUCKET}'].Name" --output text)
if [ -z $existing_tf_bucket ]; then
  aws --profile $AWS_PROFILE s3api create-bucket --bucket $TF_STATE_BUCKET
fi

# init terraform & s3 backend
rm -f *.tfstate
rm -rf ./.terraform
terraform init \
  -force-copy \
  -backend=true \
  -backend-config "bucket=${TF_STATE_BUCKET}" \
  -backend-config "key=${TF_STATE_KEY}" \
  -backend-config "profile=${AWS_PROFILE}" \
  -backend-config "region=${AWS_REGION}"

# execute terraform
terraform $TF_COMMAND \
  -var "aws_key_name=${AWS_KEY_NAME}" \
  -var "aws_profile=${AWS_PROFILE}" \
  -var "aws_region=${AWS_REGION}" \
  -var "aws_security_group_ids=${AWS_SECURITY_GROUP_IDS}" \
  -var "aws_subnet_id=${AWS_SUBNET_ID}" \
  -var "ec2_ami_id=${EC2_AMI_ID}" \
  -var "ec2_instance_type=${EC2_INSTANCE_TYPE}" \
  -var "express_ecr_image=${EXPRESS_ECR_IMAGE}" \
  -var "s3_data_bucket=${S3_DATA_BUCKET}"

To provision the architecture, I populated the terraform/.env and executed the following:

# build the ECR image
express/build.sh

# view the Terraform plan
terraform/provision.sh plan

# execute the Terraform plan
terraform/provision.sh apply

I then tested the connectivity between S3 and the ECS Node.js task by curl’ing the IP:

$ curl http://ip-of-the-ec2-instance
2017-08-05T14:12:39.398Z%

Source code on Github

Updated: