diff --git a/.dockerignore b/.dockerignore index 61bbb72..d6d3cea 100644 --- a/.dockerignore +++ b/.dockerignore @@ -16,3 +16,12 @@ # Operating system metafiles .DS_Store + +# Local build artifacts +node_modules/ +media/ +staticfiles/ + +# Deployment configuration +terraform/ +deploy.py diff --git a/.gitignore b/.gitignore index 1441253..825c31b 100644 --- a/.gitignore +++ b/.gitignore @@ -64,3 +64,6 @@ staticfiles/* static/webpack_bundles/ webpack-stats.json # END_FEATURE django_react + +.terraform.lock.hcl +.terraform/ diff --git a/Dockerfile b/Dockerfile index 0fe8706..3c08fde 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,13 @@ # START_FEATURE docker FROM python:3.11.4-slim-buster -WORKDIR /app +# reduces file creation +ENV PYTHONDONTWRITEBYTECODE=1 +# disables output buffering so logs are flushed to console +ENV PYTHONUNBUFFERED=1 -ADD requirements.txt /app/requirements.txt +# Set container working directory +WORKDIR /app RUN set -ex \ && buildDeps=" \ @@ -11,26 +15,48 @@ RUN set -ex \ libpq-dev \ " \ && deps=" \ + curl \ + vim \ + nano \ + procps \ postgresql-client \ " \ - && apt-get update && apt-get install -y $buildDeps $deps --no-install-recommends \ - && pip install --no-cache-dir -r /app/requirements.txt \ - && apt-get purge -y --auto-remove $buildDeps \ + && apt update && apt install -y $buildDeps $deps --no-install-recommends + + +# Install python dependencies +ADD requirements.txt /app/requirements.txt +RUN set -ex \ + && pip install --no-cache-dir -r /app/requirements.txt + +# Cleanup installs +RUN set -ex \ + && apt purge -y --auto-remove $buildDeps \ $(! command -v gpg > /dev/null || echo 'gnupg dirmngr') \ && rm -rf /var/lib/apt/lists/* + ENV VIRTUAL_ENV /env ENV PATH /env/bin:$PATH +# Copy project files into the container +COPY . /app/ +# Add temporary copy of env file to allow running management commands +COPY ./config/.env.build /app/config/.env + +# LTS Version of Node is 22 +ARG NODE_VERSION=22 + # START_FEATURE django_react -COPY ./nwb.config.js /app/nwb.config.js -COPY ./package.json /app/package.json -COPY ./package-lock.json /app/package-lock.json -RUN npm install +# if using nwb, nwb requires Node 16. TODO remove nwb +ARG NODE_VERSION=16 # END_FEATURE django_react -COPY . /app/ -COPY ./config/.env.example /app/config/.env +# install node +RUN curl -fsSL https://deb.nodesource.com/setup_{$NODE_VERSION}.x | bash - +RUN apt-get update && apt install nodejs -y + +RUN npm install # START_FEATURE django_react RUN ./node_modules/.bin/nwb build --no-vendor @@ -44,7 +70,7 @@ RUN python manage.py collectstatic --noinput RUN rm /app/config/.env -EXPOSE 8000 +EXPOSE 8080 -CMD ["gunicorn", "--bind", ":8000", "--workers", "3", "config.wsgi:application"] +CMD ["gunicorn", "--bind", ":8080", "--workers", "3", "config.wsgi:application", "--access-logfile", "-", "--error-logfile", "-"] # END_FEATURE docker diff --git a/config/.env.build b/config/.env.build new file mode 100644 index 0000000..56eae5f --- /dev/null +++ b/config/.env.build @@ -0,0 +1,11 @@ +# Env file for the purposes of building the docker image +ALLOWED_HOSTS= +SECRET_KEY= +DATABASE_URL=sqlite:///db.sqlite3 +GOOGLE_OAUTH2_KEY= +GOOGLE_OAUTH2_SECRET= +DEFAULT_FROM_EMAIL= +EC2_METADATA=False +AWS_SES_REGION_NAME= +AWS_SES_REGION_ENDPOINT= +AWS_STORAGE_BUCKET_NAME= diff --git a/config/settings.py b/config/settings.py index e0ed80d..e311c35 100644 --- a/config/settings.py +++ b/config/settings.py @@ -14,7 +14,6 @@ import environ - env = environ.Env( # Sets Django's ALLOWED_HOSTS setting ALLOWED_HOSTS=(list, []), @@ -48,6 +47,8 @@ # Set to True to enable the Django Debug Toolbar DEBUG_TOOLBAR=(bool, False), # END_FEATURE debug_toolbar + + EC2_METADATA=(bool, True) ) # If ALLWED_HOSTS has been configured, then we're running on a server and # can skip looking for a .env file (this assumes that .env files @@ -76,10 +77,12 @@ # that this is not the production site PRODUCTION = env("PRODUCTION") +EC2_METADATA = env("EC2_METADATA") + ALLOWED_HOSTS = env("ALLOWED_HOSTS") if LOCALHOST is True: ALLOWED_HOSTS = ["127.0.0.1", "localhost"] -else: +elif EC2_METADATA: # START_FEATURE elastic_beanstalk # if using AWS hosting from ec2_metadata import ec2_metadata @@ -129,6 +132,9 @@ MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", + # START_FEATURE docker + "whitenoise.middleware.WhiteNoiseMiddleware", + # END_FEATURE docker "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "common.middleware.MaintenanceModeMiddleware", @@ -314,6 +320,12 @@ "default_acl": "private", } } + +STATIC_BACKEND = "django.contrib.staticfiles.storage.StaticFilesStorage" if LOCALHOST else "django.contrib.staticfiles.storage.ManifestStaticFilesStorage" +# START_FEATURE docker +STATIC_BACKEND = "django.contrib.staticfiles.storage.StaticFilesStorage" if LOCALHOST else "whitenoise.storage.CompressedManifestStaticFilesStorage" +# END_FEATURE docker + # END_FEATURE django_storages STORAGES = { "default": DEFAULT_STORAGE, @@ -328,7 +340,7 @@ }, # END_FEATURE sass_bootstrap "staticfiles": { - "BACKEND": "django.contrib.staticfiles.storage.ManifestStaticFilesStorage", + "BACKEND": STATIC_BACKEND, }, } diff --git a/deploy.py b/deploy.py new file mode 100644 index 0000000..9340902 --- /dev/null +++ b/deploy.py @@ -0,0 +1,270 @@ +import argparse +import logging +import subprocess +import sys +import time + +import boto3.session + +AWS_REGION = "us-east-1" +AWS_PROFILE_NAME = "FILL ME IN" + +MIGRATION_TIMEOUT_SECONDS = 10 * 60 # Ten minutes +STATUS_CHECK_INTERVAL = 30 + + +class MigrationFailed(Exception): + pass + + +class MigrationTimeOut(Exception): + pass + + +def deploy(args): + if not args.no_input: + # Request user confirmation + confirmation_message = f"\nBegin deploying to {args.env}? " + if args.use_image_from_env: + confirmation_message += f"This will use the current {args.use_image_from_env} deployment. " + confirmation_message += "(y/n): " + confirmation = input(confirmation_message) + if confirmation.lower() not in ["y", "yes"]: + logging.warning("Deployment canceled.") + return + + # Local state setup for relevant envs + terraform_envs = [args.env] + if args.use_image_from_env: + terraform_envs.append(args.use_image_from_env) + setup(terraform_envs) + + # ECR image setup + if args.use_image_from_env: + subprocess.run(["terraform", "refresh"], cwd=f"terraform/envs/{args.use_image_from_env}", check=True, capture_output=True) + copy_image_from_env(args.use_image_from_env, args.env) + elif args.use_latest: + copy_latest_image(args.env) + elif not args.skip_build: + build_and_push_image(args.env) + else: + logging.info("Skipping build step") + + if args.skip_migration: + logging.info("Skipping database migration") + else: + # Run and wait for migrations + run_migrations(args.env) + + # Redeploy services + restart_web_service(args.env) + + +def setup(envs): + # Refresh terraform state + logging.info("Refreshing terraform state...") + for env in envs: + subprocess.run(["terraform", "refresh"], cwd=f"terraform/envs/{env}", check=True, capture_output=True) + + +def subprocess_output(command_args, **subprocess_kwargs): + output = subprocess.run(command_args, **subprocess_kwargs, capture_output=True, check=True) + return output.stdout.decode('utf-8').strip("\n").strip('"') + + +def get_terraform_output(output_key, env): + return subprocess_output(["terraform", "output", output_key], cwd=f"terraform/envs/{env}") + + +def build_and_push_image(env): + # Build and tag image + ecr_repository_name = get_terraform_output("ecr_repository_name", env) + ecr_image_uri = get_terraform_output("ecr_image_uri", env) + logging.info("Building docker image...") + build_command = ["docker", "build", "-t", ecr_repository_name, "."] + subprocess.run(build_command, check=True) + subprocess.run(["docker", "tag", f"{ecr_repository_name}:latest", ecr_image_uri], check=True) + + # Push image to ECR + logging.info("Logging in to ECR...") + password_command = ["aws", "ecr", "get-login-password", "--region", AWS_REGION, "--profile", AWS_PROFILE_NAME] + password = subprocess_output(password_command) + docker_login_command = ["docker", "login", "--username", "AWS", "--password-stdin", ecr_image_uri.split("/")[0]] + subprocess.run(docker_login_command, input=password, text=True, check=True) + logging.info("Pushing docker image to ECR...") + subprocess.run(["docker", "push", ecr_image_uri], check=True) + + # Remove unused docker images to preserve local disk space + subprocess.run(["docker", "image", "prune", "-f"]) + + +def copy_image_from_env(from_env, to_env): + # Retags image from from_env into to_env. + logging.info(f"Copying image from {from_env} to {to_env}") + from_ecr_repository_name = get_terraform_output("ecr_repository_name", from_env) + to_ecr_repository_name = get_terraform_output("ecr_repository_name", to_env) + retag_image(from_ecr_repository_name, from_env, to_ecr_repository_name, to_env) + + +def copy_latest_image(env): + # Retags latest image in repository for use by env + logging.info(f"Copying latest image to {env}") + ecr_repository_name = get_terraform_output("ecr_repository_name", env) + retag_image(ecr_repository_name, "latest", ecr_repository_name, env) + + +def retag_image(from_repository, from_tag, to_repository, to_tag): + ecr_client = boto3.session.Session(profile_name=AWS_PROFILE_NAME, region_name=AWS_REGION).client("ecr") + # Get image manifest + image_response = ecr_client.batch_get_image( + repositoryName=from_repository, + imageIds=[{ + "imageTag": from_tag + }], + acceptedMediaTypes=["string"] + ) + image_manifest = image_response["images"][0]["imageManifest"] + image_manifest_media_type = image_response["images"][0]["imageManifestMediaType"] + # Add new tag to manifest + ecr_client.put_image( + repositoryName=to_repository, + imageManifest=image_manifest, + imageTag=to_tag, + imageManifestMediaType=image_manifest_media_type, + ) + + +def run_migrations(env): + # Runs a migration task using the web server task definition with an overridden command + cluster_id = get_terraform_output("cluster_id", env) + ecs_client = boto3.session.Session(profile_name=AWS_PROFILE_NAME, region_name=AWS_REGION).client("ecs") + logging.info("Starting migrations...") + run_task_response = ecs_client.run_task( + taskDefinition=get_terraform_output("web_task_definition_arn", env), + networkConfiguration={ + "awsvpcConfiguration" : { + "subnets": [get_terraform_output("web_network_configuration_subnet", env)], + "securityGroups": [get_terraform_output("web_network_configuration_security_group", env)], + "assignPublicIp": "ENABLED" + } + }, + cluster=cluster_id, + capacityProviderStrategy=[{'capacityProvider': 'FARGATE'}], + overrides={ + "containerOverrides": [{ + "name": get_terraform_output("web_service_name", env), + "command": ["python", "manage.py", "migrate", "--no-input"] + }] + } + ) + + # Wait for task to complete. Times out after MIGRATION_TIMEOUT_SECONDS + migration_task_id = run_task_response["tasks"][0]["taskArn"] + logging.info(f"Migration task provisioned with ID {migration_task_id}") + start = time.time() + + while time.time() - start < MIGRATION_TIMEOUT_SECONDS: + logging.info("Waiting for migrations to finish...") + describe_tasks_response = ecs_client.describe_tasks(cluster=cluster_id, tasks=[migration_task_id]) + task = describe_tasks_response["tasks"][0] + stop_code = task.get("stopCode") + if not stop_code: + time.sleep(STATUS_CHECK_INTERVAL) + continue + if stop_code == "EssentialContainerExited": + # The migration task has finished successfully + logging.info("Migration complete") + return + logging.error( + f"Migration task failed with code {stop_code} and reason {task.get('stoppedReason')}." + f"Check log stream for more info: {cloudwatch_log_url(env)}" + ) + raise MigrationFailed() + logging.error( + f"Migration timed out. It may still be running. Check log stream for more info: {cloudwatch_log_url(env)}" + ) + raise MigrationTimeOut() + + +def restart_web_service(env): + # Restart ECS web service to deploy new code + ecs_client = boto3.session.Session(profile_name=AWS_PROFILE_NAME, region_name=AWS_REGION).client("ecs") + logging.info("Redeploying web service...") + cluster_id = get_terraform_output("cluster_id", env) + service_name = get_terraform_output("web_service_name", env) + ecs_client.update_service( + cluster=cluster_id, + service=service_name, + forceNewDeployment=True + ) + + while True: + logging.info("Waiting for deployment to finish...") + services_response = ecs_client.describe_services(cluster=cluster_id, services=[service_name]) + deployments = services_response["services"][0]["deployments"] + new_deployment = next(deployment for deployment in deployments if deployment["status"] == "PRIMARY") + deployment_state = new_deployment["rolloutState"] + if deployment_state == "IN_PROGRESS": + time.sleep(STATUS_CHECK_INTERVAL) + continue + if deployment_state == "COMPLETED": + logging.info("Success! Deployment complete.") + elif deployment_state == "FAILED": + logging.error( + f"Deployment failed! Reason: {new_deployment['rolloutStateReason']}. " + f"Check log stream for more info: {cloudwatch_log_url(env)}" + ) + else: + logging.warning(f"Unknown deployment state {deployment_state}. Please check the ECS console.") + break + + +def cloudwatch_log_url(env): + cloudwatch_log_group_name = get_terraform_output("cloudwatch_log_group_name", env) + return f"https://{AWS_REGION}.console.aws.amazon.com/cloudwatch/home?region={AWS_REGION}#logsV2:log-groups/log-group/{cloudwatch_log_group_name}" + + +def ssh(args): + # Runs a bash shell in a running task for the env. Note this may run in a short-lived task (e.g. migration task) + # Refresh terraform state + setup([args.env]) + cluster_id = get_terraform_output("cluster_id", args.env) + service_name = get_terraform_output("web_service_name", args.env) + ecs_client = boto3.session.Session(profile_name=AWS_PROFILE_NAME, region_name=AWS_REGION).client("ecs") + list_tasks_resp = ecs_client.list_tasks(cluster=cluster_id, serviceName=service_name) + task_ids = list_tasks_resp["taskArns"] + + if task_ids: + task_id = task_ids[0] + bash_command = ["aws", "ecs", "execute-command", "--cluster", cluster_id, "--task", task_id, + "--region", AWS_REGION, "--profile", AWS_PROFILE_NAME, "--interactive", + "--command", "'/bin/bash'"] + subprocess.run(bash_command) + + + +def main(): + parser = argparse.ArgumentParser(prog="python deploy.py") + parser.add_argument("--no-input", action="store_true", + help="Skips request for confirmation before starting the deploy.") + parser.add_argument("--skip-build", action="store_true", + help="Skips the build step and uses the existing ECR image for the environment.") + parser.add_argument("--use-latest", action="store_true", + help="Skips the build step and uses the ECR image tagged `latest`.") + parser.add_argument("--use-image-from-env", + help="If provided, skips the terraform build and instead uses the existing built image from the specified environment") + parser.add_argument("--skip-migration", action="store_true", help="Skips the migration step.") + parser.add_argument("env", help="Terraform environment to deploy") + parser.set_defaults(func=deploy) + + subparsers = parser.add_subparsers(title="Extra utilities", prog="python deploy.py -env ") + ssh_parser = subparsers.add_parser("ssh", help="SSH into running container in env instead of deploying") + ssh_parser.set_defaults(func=ssh) + + args = parser.parse_args() + logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(levelname)s - %(message)s") + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..a655e33 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,44 @@ +version: '3' + +services: + web: + build: . + command: > + /bin/bash -c " + python manage.py collectstatic --noinput && + sleep 2 && + python manage.py migrate --noinput && + python manage.py runserver 0.0.0.0:8001 + " + volumes: + - .:/app + ports: + - "8001:8001" + environment: + DEBUG: True + LOCALHOST: True + DATABASE_URL: postgres://dbuser:dbpassword@db:5432/dbname + SECRET_KEY: 'supersecret' + AWS_STORAGE_BUCKET_NAME: "" + DEFAULT_FROM_EMAIL: "example@example.com" + ALLOWED_HOSTS: 'localhost,127.0.0.1' + GOOGLE_OAUTH2_KEY: "" + GOOGLE_OAUTH2_SECRET: "" + EC2_METADATA: False + SENTRY_DSN: "" + depends_on: + - db + + db: + image: postgres:16 + environment: + POSTGRES_DB: dbname + POSTGRES_USER: dbuser + POSTGRES_PASSWORD: dbpassword + ports: + - "5432:5432" + volumes: + - postgres_data:/var/lib/postgresql/data + +volumes: + postgres_data: diff --git a/readme.md b/readme.md index 2c1d3cd..02fcffc 100644 --- a/readme.md +++ b/readme.md @@ -102,9 +102,9 @@ pip install eb-create-environment # For automating the creation of Elastic Bean pip install eb-ssm # For Elastic Beanstalk SSH functionality without requiring shared private keys ``` -## Creating a new environment +## Creating a new Elastic Beanstalk environment -To do create a new Elastic Beanstalk environment, modify the contents of [.elasticbeanstalk/eb_create_environment.yml]([.elasticbeanstalk/eb_create_environment.yml]) and run `eb-create-environment -c .elasticbeanstalk/eb_create_environment.yml`. +To create a new Elastic Beanstalk environment, modify the contents of [.elasticbeanstalk/eb_create_environment.yml]([.elasticbeanstalk/eb_create_environment.yml]) and run `eb-create-environment -c .elasticbeanstalk/eb_create_environment.yml`. See the docs for [eb-create-environment](https://github.com/zagaran/eb-create-environment/) for more details. @@ -121,12 +121,57 @@ DEFAULT_FROM_EMAIL Following that, deploy your code to the environment (see below). + +## Creating a new ECS environment + +In the following steps, config variables go in `terraform/envs//main.tf`; most of them go in the definition of the `ecs_deployment` module. + +1. Create a VPC and subnets (or use the default VPC). This is config var `ecs_deployment.vpc_id`. +1. Create an ECR repository. This is config var `ecs_deployment.ecr_repository_name`. +2. Build and push an initial docker file to it (ECR provides docker commands for this) and tag it with a tag called ``. +3. Create a bucket for holding terraform config. This is config var `terraform.backend.bucket`. +4. Create an SES identity and from email (if using SES). The from email is config var `ecs_deployment.ses_from_email`. +5. Create an AWS certificate manager certificate for your domain. This is config var `ecs_deployment.certificate_manager_arn`. +6. Create a secrets manager secret containing the config parameters needed by the application (you do not need include "DATABASE_URL", "SECRET_KEY", "AWS_STORAGE_BUCKET_NAME", or "DEFAULT_FROM_EMAIL" as those are managed by terraform in `terraform/modules/ecs_deployment/secrets_manager.tf`). This is config var `ecs_deployment.web_config_secret_name`. +7. Fill in the remaining config vars: + * `ecs_deployment.application_name` with a name for your application. + * `ecs_deployment.rds_engine_version` with the version of Postgres you want to use. + * `ecs_deployment.s3_bucket_prefix` with a prefix for your s3 bucket so that it will have a globally unique name (the bucket will be named `_`). +8. Run terraform to set up that environment: +``` +cd terraform/envs/ +terraform init +terraform plan +terraform apply +``` + +9. Redeploy your code using the steps described below (with the --use-latest option) to run initial migrations +10. Add a DNS entry from your domain name to the created load balancer + + ## Deploying code -To deploy new versions of your code to your environment, run `eb deploy ` using the EB CLI to deploy your code to that environment. +### Elastic Beanstalk +To deploy new versions of your code to an elastic beanstalk environment, run `eb deploy ` using the EB CLI to deploy your code to that environment. See the [eb-cli](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/eb-cli3.html) docs on general command line usage of Elastic Beanstalk. +### ECS +To deploy new versions of your code to an ECS environment, use the included `deploy.py` script. First fill in the +missing constants at the top of that file, and then run the script: +``` +python deploy.py +``` +This script will do the following: +1. Build the docker image using your local code version. +2. Push the docker image to the ECR location for the specified environment +3. Run database migrations +4. Deploy to the running web service + +Run `python deploy.py --help` to see available options. You may choose to use an existing ECR image or skip migrations. + ## SSH -To SSH into an Elastic Beanstalk Environment, use [eb-ssm](https://github.com/zagaran/eb-ssm). \ No newline at end of file +To SSH into an Elastic Beanstalk Environment, use [eb-ssm](https://github.com/zagaran/eb-ssm). + +To SSH into an ECS environment, use `python deploy.py -env ssh` diff --git a/requirements-dev.txt b/requirements-dev.txt index c44efe1..757130e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -322,6 +322,10 @@ werkzeug==3.0.1 # via -r requirements-dev.in wheel==0.41.3 # via pip-tools +whitenoise==6.8.2 + # via + # -c requirements.txt + # -r requirements.in # The following packages are considered to be unsafe in a requirements file: # pip diff --git a/requirements.in b/requirements.in index 6fe690f..e7ad4e9 100644 --- a/requirements.in +++ b/requirements.in @@ -33,6 +33,7 @@ django-storages # START_FEATURE docker gunicorn +whitenoise # END_FEATURE docker # START_FEATURE django_ses diff --git a/requirements.txt b/requirements.txt index 5584a69..1d6925a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -157,3 +157,5 @@ urllib3==2.0.7 # sentry-sdk wcwidth==0.2.9 # via prompt-toolkit +whitenoise==6.8.2 + # via -r requirements.in diff --git a/terraform/.terraform/modules/modules.json b/terraform/.terraform/modules/modules.json new file mode 100644 index 0000000..9a91210 --- /dev/null +++ b/terraform/.terraform/modules/modules.json @@ -0,0 +1 @@ +{"Modules":[{"Key":"","Source":"","Dir":"."},{"Key":"ecs_deployment","Source":"./modules/ecs_deployment","Dir":"modules/ecs_deployment"}]} \ No newline at end of file diff --git a/terraform/envs/production/main.tf b/terraform/envs/production/main.tf new file mode 100644 index 0000000..0b02536 --- /dev/null +++ b/terraform/envs/production/main.tf @@ -0,0 +1,86 @@ +terraform { + backend "s3" { + bucket = "" # TODO: FILL ME IN + key = "production.tfstate" + region = "us-east-1" # TODO: FILL ME IN + profile = "" # TODO: FILL ME IN + } + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>5.59" + } + } +} + +provider "aws" { + region = "us-east-1" # TODO: FILL ME IN + profile = "" # TODO: FILL ME IN +} + +module "ecs_deployment" { + source = "../../modules/ecs_deployment" + + # Required Variables + environment_name = "production" + application_name = "" # TODO: FILL ME IN + vpc_id = "" # TODO: FILL ME IN + web_config_secret_name = "" # TODO: FILL ME IN + s3_bucket_prefix = "" # TODO: FILL ME IN + rds_engine_version = "" # TODO: FILL ME IN + ses_from_email = "" # TODO: FILL ME IN + certificate_manager_arn = "" # TODO: FILL ME IN + ecr_repository_name = "" # TODO: FILL ME IN + + # Optional Variables + rds_backup_retention_period = 30 + rds_deletion_protection = true + rds_instance_class = "db.m7g.large" + rds_multi_az = true + container_web_cpu = 1024 + container_web_memory = 1024 + container_count = 2 + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-Res-FIPS-2023-04" +} + +output "cluster_id" { + description = "The ID of the ECS cluster" + value = module.ecs_deployment.cluster_id +} + +output "cloudwatch_log_group_name" { + description = "The name of the cloudwatch log group for the web service task" + value = module.ecs_deployment.cloudwatch_log_group_name +} + +output "ecr_repository_name" { + description = "The name of the ECR repository" + value = module.ecs_deployment.ecr_repository_name +} + +output "public_ip" { + description = "The public IP address of the load balancer for the web service" + value = module.ecs_deployment.public_ip +} + +output "web_service_name" { + description = "The name of the ECS container running the web service" + value = module.ecs_deployment.web_service_name +} + +output "web_network_configuration_security_group" { + description = "The security groups used by the ECS web task" + value = tolist(module.ecs_deployment.web_network_configuration_security_groups)[0] + +} + +output "web_network_configuration_subnet" { + description = "The ID of one the subnets used by the web task" + value = tolist(module.ecs_deployment.web_network_configuration_subnets)[0] +} + +output "web_task_definition_arn" { + description = "The ARN of the ECS web service task definition" + value = module.ecs_deployment.web_task_definition_arn +} diff --git a/terraform/envs/staging/main.tf b/terraform/envs/staging/main.tf new file mode 100644 index 0000000..c25f894 --- /dev/null +++ b/terraform/envs/staging/main.tf @@ -0,0 +1,86 @@ +terraform { + backend "s3" { + bucket = "" # TODO: FILL ME IN + key = "staging.tfstate" + region = "us-east-1" # TODO: FILL ME IN + profile = "" # TODO: FILL ME IN + } + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>5.59" + } + } +} + +provider "aws" { + region = "us-east-1" # TODO: FILL ME IN + profile = "" # TODO: FILL ME IN +} + +module "ecs_deployment" { + source = "../../modules/ecs_deployment" + + # Required Variables + environment_name = "staging" + application_name = "" # TODO: FILL ME IN + vpc_id = "" # TODO: FILL ME IN + web_config_secret_name = "" # TODO: FILL ME IN + s3_bucket_prefix = "" # TODO: FILL ME IN + rds_engine_version = "" # TODO: FILL ME IN + ses_from_email = "" # TODO: FILL ME IN + certificate_manager_arn = "" # TODO: FILL ME IN + ecr_repository_name = "" # TODO: FILL ME IN + + # Optional Variables + rds_backup_retention_period = 10 + rds_deletion_protection = true + rds_instance_class = "db.t3.micro" + rds_multi_az = false + container_web_cpu = 256 + container_web_memory = 1024 + container_count = 1 + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-Res-FIPS-2023-04" +} + +output "cluster_id" { + description = "The ID of the ECS cluster" + value = module.ecs_deployment.cluster_id +} + +output "cloudwatch_log_group_name" { + description = "The name of the cloudwatch log group for the web service task" + value = module.ecs_deployment.cloudwatch_log_group_name +} + +output "ecr_repository_name" { + description = "The name of the ECR repository" + value = module.ecs_deployment.ecr_repository_name +} + +output "public_ip" { + description = "The public IP address of the load balancer for the web service" + value = module.ecs_deployment.public_ip +} + +output "web_service_name" { + description = "The name of the ECS container running the web service" + value = module.ecs_deployment.web_service_name +} + +output "web_network_configuration_security_group" { + description = "The security groups used by the ECS web task" + value = tolist(module.ecs_deployment.web_network_configuration_security_groups)[0] + +} + +output "web_network_configuration_subnet" { + description = "The ID of one the subnets used by the web task" + value = tolist(module.ecs_deployment.web_network_configuration_subnets)[0] +} + +output "web_task_definition_arn" { + description = "The ARN of the ECS web service task definition" + value = module.ecs_deployment.web_task_definition_arn +} diff --git a/terraform/modules/ecs_deployment/alb.tf b/terraform/modules/ecs_deployment/alb.tf new file mode 100644 index 0000000..23ea78a --- /dev/null +++ b/terraform/modules/ecs_deployment/alb.tf @@ -0,0 +1,52 @@ +resource "aws_lb" "alb" { + name = var.environment_name + internal = false + load_balancer_type = "application" + security_groups = [aws_security_group.load_balancer.id] + subnets = data.aws_subnets.subnets.ids +} + + +resource "aws_lb_target_group" "target_group" { + name = var.environment_name + port = 8080 + protocol = "HTTP" + vpc_id = data.aws_vpc.vpc.id + target_type = "ip" + health_check { + path = "/health-check/" + } + lifecycle { + create_before_destroy = true + } +} + + +resource "aws_lb_listener" "http_redirect" { + load_balancer_arn = aws_lb.alb.arn + port = "80" + protocol = "HTTP" + + default_action { + type = "redirect" + + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +resource "aws_lb_listener" "https" { + load_balancer_arn = aws_lb.alb.arn + port = "443" + protocol = "HTTPS" + ssl_policy = var.ssl_policy + certificate_arn = var.certificate_manager_arn + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.target_group.arn + } +} \ No newline at end of file diff --git a/terraform/modules/ecs_deployment/ecs.tf b/terraform/modules/ecs_deployment/ecs.tf new file mode 100644 index 0000000..653949c --- /dev/null +++ b/terraform/modules/ecs_deployment/ecs.tf @@ -0,0 +1,79 @@ +resource "aws_ecs_cluster" "cluster" { + name = "${local.app_env_name}" + + setting { + name = "containerInsights" + value = "enabled" + } +} + +resource "aws_ecs_cluster_capacity_providers" "fargate_provider" { + cluster_name = aws_ecs_cluster.cluster.name + + capacity_providers = ["FARGATE"] +} + +resource "aws_ecs_task_definition" "web" { + family = "${local.app_env_name}-web" + + container_definitions = jsonencode([ + { + name = "${local.app_env_name}-web" + image = local.ecr_image_uri + essential = true + portMappings = [ + { + containerPort = 8080 + hostPort = 8080 + } + ], + logConfiguration = { + logDriver = "awslogs", + options = { + awslogs-group = aws_cloudwatch_log_group.web_log_group.name, + awslogs-region = data.aws_region.current.name, + awslogs-stream-prefix = "ecs" + } + } + secrets = local.ecs_secrets + } + ]) + + requires_compatibilities = ["FARGATE"] + cpu = var.container_web_cpu + memory = var.container_web_memory + execution_role_arn = aws_iam_role.ecs_execution_role.arn + network_mode = "awsvpc" + task_role_arn = aws_iam_role.ecs_task_role.arn +} + +resource "aws_ecs_service" "web" { + name = "${local.app_env_name}-web" + cluster = aws_ecs_cluster.cluster.id + task_definition = aws_ecs_task_definition.web.arn + desired_count = var.container_count + launch_type = "FARGATE" + enable_execute_command = true + + deployment_circuit_breaker { + enable = true + rollback = true + } + + load_balancer { + target_group_arn = aws_lb_target_group.target_group.arn + container_name = "${local.app_env_name}-web" + container_port = 8080 + } + + network_configuration { + subnets = data.aws_subnets.subnets.ids + security_groups = [aws_security_group.web.id] + assign_public_ip = true + } +} + +resource "aws_cloudwatch_log_group" "web_log_group" { + name = "${local.app_env_name}-web" + retention_in_days = 90 +} \ No newline at end of file diff --git a/terraform/modules/ecs_deployment/iam.tf b/terraform/modules/ecs_deployment/iam.tf new file mode 100644 index 0000000..f7e975b --- /dev/null +++ b/terraform/modules/ecs_deployment/iam.tf @@ -0,0 +1,147 @@ +data "aws_iam_policy_document" "ecs_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ecs-tasks.amazonaws.com"] + } + } +} + + +data "aws_iam_policy_document" "ecs_execution_role_policy" { + statement { + effect = "Allow" + actions = [ + "ecr:GetAuthorizationToken" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage" + ] + resources = [ + "arn:aws:ecr:*:*:repository/${var.ecr_repository_name}" + ] + } + + statement { + effect = "Allow" + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + resources = [ + "${aws_cloudwatch_log_group.web_log_group.arn}:*" + ] + } + + statement { + effect = "Allow" + actions = [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ] + resources = [ + aws_secretsmanager_secret.web_infrastructure.arn, + data.aws_secretsmanager_secret.web_config.arn + ] + } + + statement { + effect = "Allow" + actions = [ + "kms:Decrypt" + ] + resources = [ + "arn:aws:kms:*:*:aws/secretsmanager" + ] + } +} + + +data "aws_iam_policy_document" "ecs_task_role_policy" { + statement { + effect = "Allow" + actions = [ + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "ses:GetSendQuota" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "ses:SendBulkTemplatedEmail", + "ses:SendEmail", + "ses:SendRawEmail", + "ses:SendTemplatedEmail" + ] + resources = ["*"] + condition { + test = "StringLike" + variable = "ses:FromAddress" + values = [var.ses_from_email] + } + } + + statement { + effect = "Allow" + actions = [ + "s3:*" + ] + resources = [ + format("arn:aws:s3:::%s", aws_s3_bucket.bucket.id), + format("arn:aws:s3:::%s/*", aws_s3_bucket.bucket.id) + ] + } + + statement { + effect = "Allow" + actions = [ + "kms:Decrypt" + ] + resources = [ + "arn:aws:kms:*:*:aws/secretsmanager" + ] + } +} + +resource "aws_iam_role" "ecs_execution_role" { + name = "${local.app_env_name}-ecs-execution-role" + assume_role_policy = data.aws_iam_policy_document.ecs_assume_role_policy.json +} + +resource "aws_iam_role_policy" "ecs_execution_role_policy" { + name = "ecs-execution-role-policy" + role = aws_iam_role.ecs_execution_role.id + policy = data.aws_iam_policy_document.ecs_execution_role_policy.json +} + +resource "aws_iam_role" "ecs_task_role" { + name = "${local.app_env_name}-ecs-task-role" + assume_role_policy = data.aws_iam_policy_document.ecs_assume_role_policy.json +} + +resource "aws_iam_role_policy" "ecs_task_role_policy" { + name = "ecs-task-role-policy" + role = aws_iam_role.ecs_task_role.id + policy = data.aws_iam_policy_document.ecs_task_role_policy.json +} diff --git a/terraform/modules/ecs_deployment/locals.tf b/terraform/modules/ecs_deployment/locals.tf new file mode 100644 index 0000000..81d49a1 --- /dev/null +++ b/terraform/modules/ecs_deployment/locals.tf @@ -0,0 +1,25 @@ +data "aws_caller_identity" "current" {} + +locals { + app_env_name = "${var.application_name}-${var.environment_name}" + + ecr_image_uri = "${data.aws_caller_identity.current.account_id}.dkr.ecr.${data.aws_region.current.name}.amazonaws.com/${var.ecr_repository_name}:${var.environment_name}" + + ecs_infrastructure_secrets = [ + for setting in keys(jsondecode(nonsensitive(aws_secretsmanager_secret_version.web_infrastructure.secret_string))) : + { + name : setting + valueFrom : format("%s:%s::", aws_secretsmanager_secret.web_infrastructure.arn, setting) + } + ] + + ecs_config_secrets = [ + for setting in keys(jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.web_config.secret_string))) : + { + name : setting + valueFrom : format("%s:%s::", data.aws_secretsmanager_secret.web_config.arn, setting) + } + ] + + ecs_secrets = concat(local.ecs_infrastructure_secrets, local.ecs_config_secrets) +} \ No newline at end of file diff --git a/terraform/modules/ecs_deployment/outputs.tf b/terraform/modules/ecs_deployment/outputs.tf new file mode 100644 index 0000000..8d77caa --- /dev/null +++ b/terraform/modules/ecs_deployment/outputs.tf @@ -0,0 +1,44 @@ +output "cluster_id" { + description = "The ID of the ECS cluster" + value = aws_ecs_cluster.cluster.id +} + +output "cloudwatch_log_group_name" { + description = "The name of the cloudwatch log group for the web service task" + value = aws_cloudwatch_log_group.web_log_group.name +} + +output "ecr_image_uri" { + description = "The full URI of the ECR image" + value = local.ecr_image_uri +} + +output "ecr_repository_name" { + description = "The name of the ECR repository" + value = var.ecr_repository_name +} + +output "public_ip" { + description = "The public IP address of the load balancer for the web service" + value = aws_lb.alb.dns_name +} + +output "web_service_name" { + description = "The name of the ECS web service. This is also the container name." + value = aws_ecs_service.web.name +} + +output "web_network_configuration_security_groups" { + description = "The security groups used by the ECS web task" + value = aws_ecs_service.web.network_configuration[0].security_groups +} + +output "web_network_configuration_subnets" { + description = "The ID of the subnets used by the web task" + value = aws_ecs_service.web.network_configuration[0].subnets +} + +output "web_task_definition_arn" { + description = "The ARN of the ECS web service task definition" + value = aws_ecs_task_definition.web.arn +} \ No newline at end of file diff --git a/terraform/modules/ecs_deployment/rds.tf b/terraform/modules/ecs_deployment/rds.tf new file mode 100644 index 0000000..bf5a66a --- /dev/null +++ b/terraform/modules/ecs_deployment/rds.tf @@ -0,0 +1,31 @@ +resource "random_password" "db_password" { + length = 50 + special = false +} + + +resource "aws_db_instance" "database" { + allocated_storage = 20 + allow_major_version_upgrade = true + apply_immediately = true + backup_retention_period = var.rds_backup_retention_period + db_name = format("%s_db", replace(var.application_name, "-", "_")) + deletion_protection = var.rds_deletion_protection + engine = "postgres" + engine_version = var.rds_engine_version + identifier = "${local.app_env_name}-db" + instance_class = var.rds_instance_class + multi_az = var.rds_multi_az + password = random_password.db_password.result + storage_encrypted = true + storage_type = "gp2" + username = "dbuser" + vpc_security_group_ids = [aws_security_group.database.id] + db_subnet_group_name = aws_db_subnet_group.database.name +} + + +resource "aws_db_subnet_group" "database" { + name = "${local.app_env_name}-database-subnets" + subnet_ids = data.aws_subnets.subnets.ids +} diff --git a/terraform/modules/ecs_deployment/s3.tf b/terraform/modules/ecs_deployment/s3.tf new file mode 100644 index 0000000..a983340 --- /dev/null +++ b/terraform/modules/ecs_deployment/s3.tf @@ -0,0 +1,36 @@ +resource "aws_s3_bucket" "bucket" { + bucket = "${var.s3_bucket_prefix}-${var.environment_name}" + + tags = { + Environment = var.environment_name + } +} + + +resource "aws_s3_bucket_public_access_block" "bucket" { + bucket = aws_s3_bucket.bucket.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + + +resource "aws_s3_bucket_versioning" "bucket" { + bucket = aws_s3_bucket.bucket.id + versioning_configuration { + status = "Enabled" + } +} + + +resource "aws_s3_bucket_server_side_encryption_configuration" "bucket" { + bucket = aws_s3_bucket.bucket.id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "aws:kms" + } + } +} diff --git a/terraform/modules/ecs_deployment/secrets_manager.tf b/terraform/modules/ecs_deployment/secrets_manager.tf new file mode 100644 index 0000000..5704235 --- /dev/null +++ b/terraform/modules/ecs_deployment/secrets_manager.tf @@ -0,0 +1,33 @@ +resource "aws_secretsmanager_secret" "web_infrastructure" { + name = "${local.app_env_name}-web-infrastructure" +} + +resource "aws_secretsmanager_secret_version" "web_infrastructure" { + secret_id = aws_secretsmanager_secret.web_infrastructure.id + secret_string = jsonencode({ + AWS_STORAGE_BUCKET_NAME = aws_s3_bucket.bucket.id + DATABASE_URL = format( + "postgres://dbuser:%s@%s:5432/database?sslmode=require", + random_password.db_password.result, + aws_db_instance.database.address, + ) + DEFAULT_FROM_EMAIL = var.ses_from_email + SECRET_KEY = random_password.app_secret_key.result + }) +} + + +data "aws_secretsmanager_secret" "web_config" { + name = var.web_config_secret_name +} + + +data "aws_secretsmanager_secret_version" "web_config" { + secret_id = data.aws_secretsmanager_secret.web_config.id +} + + +resource "random_password" "app_secret_key" { + length = 32 + special = false +} diff --git a/terraform/modules/ecs_deployment/security_groups.tf b/terraform/modules/ecs_deployment/security_groups.tf new file mode 100644 index 0000000..318d79a --- /dev/null +++ b/terraform/modules/ecs_deployment/security_groups.tf @@ -0,0 +1,88 @@ +resource "aws_security_group" "load_balancer" { + name = "${local.app_env_name}-lb" + vpc_id = var.vpc_id + + ingress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] + } + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "${var.application_name} ${var.environment_name} load balancer" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_security_group" "web" { + name = "${local.app_env_name}-web" + vpc_id = var.vpc_id + + ingress { + from_port = 8080 + to_port = 8080 + protocol = "tcp" + security_groups = [aws_security_group.load_balancer.id] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "${var.application_name} ${var.environment_name} web" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_security_group" "database" { + name = "${local.app_env_name}-db" + vpc_id = var.vpc_id + + ingress { + from_port = 5432 + to_port = 5432 + protocol = "tcp" + security_groups = [aws_security_group.web.id] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "${var.application_name} ${var.environment_name} database" + } + + lifecycle { + create_before_destroy = true + } +} diff --git a/terraform/modules/ecs_deployment/variables.tf b/terraform/modules/ecs_deployment/variables.tf new file mode 100644 index 0000000..be4b90d --- /dev/null +++ b/terraform/modules/ecs_deployment/variables.tf @@ -0,0 +1,77 @@ +# Required Variables +variable "application_name" { + type = string +} + +variable "environment_name" { + type = string +} + +variable "vpc_id" { + type = string +} + +variable "web_config_secret_name" { + type = string # key for secrets_manager secrets that are not terraform managed +} + +variable "s3_bucket_prefix" { + type = string +} + +variable "rds_engine_version" { + type = string +} + +variable "ses_from_email" { + type = string +} + +variable "certificate_manager_arn" { + type = string +} + +variable "ecr_repository_name" { + type = string +} + +# Optional Variables +variable "rds_backup_retention_period" { + type = number + default = 30 +} + +variable "rds_deletion_protection" { + type = bool + default = true +} + +variable "rds_instance_class" { + type = string + default = "db.t3.micro" +} + +variable "rds_multi_az" { + type = bool + default = false +} + +variable "container_web_cpu" { + type = number + default = 256 +} + +variable "container_web_memory" { + type = number + default = 1024 +} + +variable "container_count" { + type = number + default = 1 +} + +variable "ssl_policy" { + type = string + default = "ELBSecurityPolicy-TLS13-1-2-Res-FIPS-2023-04" +} diff --git a/terraform/modules/ecs_deployment/vpc.tf b/terraform/modules/ecs_deployment/vpc.tf new file mode 100644 index 0000000..1875ec8 --- /dev/null +++ b/terraform/modules/ecs_deployment/vpc.tf @@ -0,0 +1,12 @@ +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "subnets" { + filter { + name = "vpc-id" + values = [data.aws_vpc.vpc.id] + } +} + +data "aws_region" "current" {}