diff --git a/.tool-versions b/.tool-versions index 32db55a..431bf79 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,6 +1,5 @@ # This file is for you! Please, updated to the versions agreed by your team. -terraform 1.7.0 pre-commit 3.6.0 # ============================================================================== diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000..f0d9138 --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,60 @@ +### Terraform ### + +# Transient backends +components/**/backend_tfscaffold.tf + +# Compiled files +**/*.tfstate +**/*.tfplan +**/*.tfstate.backup +**/.terraform +**/.terraform.lock.hcl +**/.terraform/* +**/build/* +**/work/* +**/*tfstate.lock.info + +# Scaffold Plugin Cache +plugin-cache/* + +# PyCache +**/__pycache__ + +### OSX ### +**/.DS_Store +**/.AppleDouble +**/.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +*.swp +.nyc_output + +# VS Code +.vscode + +# IntelliJ Idea +.idea +**/*.iml + +# js +node_modules diff --git a/terraform/bin/terraform.sh b/terraform/bin/terraform.sh new file mode 100755 index 0000000..22143d1 --- /dev/null +++ b/terraform/bin/terraform.sh @@ -0,0 +1,804 @@ +#!/bin/bash +# Terraform Scaffold +# +# A wrapper for running terraform projects +# - handles remote state +# - uses consistent .tfvars files for each environment + +## +# Set Script Version +## +readonly script_ver="1.8.0"; + +## +# Standardised failure function +## +function error_and_die { + echo -e "ERROR: ${1}" >&2; + exit 1; +}; + +## +# Print Script Version +## +function version() { + echo "${script_ver}"; +} + +## +# Print Usage Text +## +function usage() { + +cat < + +action: + - Special actions: + * plan / plan-destroy + * apply / destroy + * graph + * taint / untaint + * shell + - Generic actions: + * See https://www.terraform.io/docs/commands/ + +bucket_prefix (optional): + Defaults to: "\${project_name}-tfscaffold" + - myproject-terraform + - terraform-yourproject + - my-first-tfscaffold-project + +build_id (optional): + - testing + - \$BUILD_ID (jenkins) + +component_name: + - the name of the terraform component module in the components directory + +environment: + - dev + - test + - prod + - management + +group: + - dev + - live + - mytestgroup + +project: + - The name of the project being deployed + +region (optional): + Defaults to value of \$AWS_DEFAULT_REGION + - the AWS region name unique to all components and terraform processes + +detailed-exitcode (optional): + When not provided, false. + Changes the plan operation to exit 0 only when there are no changes. + Will be ignored for actions other than plan. + +no-color (optional): + Append -no-color to all terraform calls + +compact-warnings (optional): + Append -compact-warnings to all terraform calls + +lockfile: + Append -lockfile=MODE to calls to terraform init + +additional arguments: + Any arguments provided after "--" will be passed directly to terraform as its own arguments +EOF +}; + +## +# Test for GNU getopt +## +getopt_out=$(getopt -T) +if (( $? != 4 )) && [[ -n $getopt_out ]]; then + error_and_die "Non GNU getopt detected. If you're using a Mac then try \"brew install gnu-getopt\""; +fi + +## +# Execute getopt and process script arguments +## +readonly raw_arguments="${*}"; +ARGS=$(getopt \ + -o dhnvwa:b:c:e:g:i:l:p:r: \ + -l "help,version,bootstrap,action:,bucket-prefix:,build-id:,component:,environment:,group:,project:,region:,lockfile:,detailed-exitcode,no-color,compact-warnings" \ + -n "${0}" \ + -- \ + "$@"); + +#Bad arguments +if [ $? -ne 0 ]; then + usage; + error_and_die "command line argument parse failure"; +fi; + +eval set -- "${ARGS}"; + +declare bootstrap="false"; +declare component_arg; +declare region_arg; +declare environment_arg; +declare group; +declare action; +declare bucket_prefix; +declare build_id; +declare project; +declare detailed_exitcode; +declare no_color; +declare compact_warnings; +declare lockfile; + +while true; do + case "${1}" in + -h|--help) + usage; + exit 0; + ;; + -v|--version) + version; + exit 0; + ;; + -c|--component) + shift; + if [ -n "${1}" ]; then + component_arg="${1}"; + shift; + fi; + ;; + -r|--region) + shift; + if [ -n "${1}" ]; then + region_arg="${1}"; + shift; + fi; + ;; + -e|--environment) + shift; + if [ -n "${1}" ]; then + environment_arg="${1}"; + shift; + fi; + ;; + -g|--group) + shift; + if [ -n "${1}" ]; then + group="${1}"; + shift; + fi; + ;; + -a|--action) + shift; + if [ -n "${1}" ]; then + action="${1}"; + shift; + fi; + ;; + -b|--bucket-prefix) + shift; + if [ -n "${1}" ]; then + bucket_prefix="${1}"; + shift; + fi; + ;; + -i|--build-id) + shift; + if [ -n "${1}" ]; then + build_id="${1}"; + shift; + fi; + ;; + -l|--lockfile) + shift; + if [ -n "${1}" ]; then + lockfile="-lockfile=${1}"; + shift; + fi; + ;; + -p|--project) + shift; + if [ -n "${1}" ]; then + project="${1}"; + shift; + fi; + ;; + --bootstrap) + shift; + bootstrap="true"; + ;; + -d|--detailed-exitcode) + shift; + detailed_exitcode="true"; + ;; + -n|--no-color) + shift; + no_color="-no-color"; + ;; + -w|--compact-warnings) + shift; + compact_warnings="-compact-warnings"; + ;; + --) + shift; + break; + ;; + esac; +done; + +declare extra_args="${@} ${no_color} ${compact_warnings}"; # All arguments supplied after "--" + +## +# Script Set-Up +## + +# Determine where I am and from that derive basepath and project name +script_path="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"; +base_path="${script_path%%\/bin}"; +project_name_default="${base_path##*\/}"; + +status=0; + +echo "Args ${raw_arguments}"; + +# Ensure script console output is separated by blank line at top and bottom to improve readability +trap echo EXIT; +echo; + +## +# Munge Params +## + +# Set Region from args or environment. Exit if unset. +readonly region="${region_arg:-${AWS_DEFAULT_REGION}}"; +[ -n "${region}" ] \ + || error_and_die "No AWS region specified. No -r/--region argument supplied and AWS_DEFAULT_REGION undefined"; + +[ -n "${project}" ] \ + || error_and_die "Required argument -p/--project not specified"; + +# Bootstrapping is special +if [ "${bootstrap}" == "true" ]; then + [ -n "${component_arg}" ] \ + && error_and_die "The --bootstrap parameter and the -c/--component parameter are mutually exclusive"; + [ -n "${build_id}" ] \ + && error_and_die "The --bootstrap parameter and the -i/--build-id parameter are mutually exclusive. We do not currently support plan files for bootstrap"; + [ -n "${environment_arg}" ] && readonly environment="${environment_arg}"; +else + # Validate component to work with + [ -n "${component_arg}" ] \ + || error_and_die "Required argument missing: -c/--component"; + readonly component="${component_arg}"; + + # Validate environment to work with + [ -n "${environment_arg}" ] \ + || error_and_die "Required argument missing: -e/--environment"; + readonly environment="${environment_arg}"; +fi; + +[ -n "${action}" ] \ + || error_and_die "Required argument missing: -a/--action"; + +# Validate AWS Credentials Available +iam_iron_man="$(aws sts get-caller-identity --query 'Arn' --output text)"; +if [ -n "${iam_iron_man}" ]; then + echo -e "AWS Credentials Found. Using ARN '${iam_iron_man}'"; +else + error_and_die "No AWS Credentials Found. \"aws sts get-caller-identity --query 'Arn' --output text\" responded with ARN '${iam_iron_man}'"; +fi; + +# Query canonical AWS Account ID +aws_account_id="$(aws sts get-caller-identity --query 'Account' --output text)"; +if [ -n "${aws_account_id}" ]; then + echo -e "AWS Account ID: ${aws_account_id}"; +else + error_and_die "Couldn't determine AWS Account ID. \"aws sts get-caller-identity --query 'Account' --output text\" provided no output"; +fi; + +# Validate S3 bucket. Set default if undefined +if [ -n "${bucket_prefix}" ]; then + readonly bucket="${bucket_prefix}-${aws_account_id}-${region}" + echo -e "Using S3 bucket s3://${bucket}"; +else + readonly bucket="${project}-tfscaffold-${aws_account_id}-${region}"; + echo -e "No bucket prefix specified. Using S3 bucket s3://${bucket}"; +fi; + +declare component_path; +if [ "${bootstrap}" == "true" ]; then + component_path="${base_path}/bootstrap"; +else + component_path="${base_path}/components/${component}"; +fi; + +# Get the absolute path to the component +if [[ "${component_path}" != /* ]]; then + component_path="$(cd "$(pwd)/${component_path}" && pwd)"; +else + component_path="$(cd "${component_path}" && pwd)"; +fi; + +[ -d "${component_path}" ] || error_and_die "Component path ${component_path} does not exist"; + +## Debug +#echo $component_path; + +## +# Begin parameter-dependent logic +## + +case "${action}" in + apply) + refresh="-refresh=true"; + ;; + destroy) + destroy='-destroy'; + refresh="-refresh=true"; + ;; + plan) + refresh="-refresh=true"; + ;; + plan-destroy) + action="plan"; + destroy="-destroy"; + refresh="-refresh=true"; + ;; + *) + ;; +esac; + +# Tell terraform to moderate its output to be a little +# more friendly to automation wrappers +# Value is irrelavant, just needs to be non-null +export TF_IN_AUTOMATION="true"; + +for rc_path in "${base_path}" "${base_path}/etc" "${component_path}"; do + if [ -f "${rc_path}/.terraformrc" ]; then + echo "Found .terraformrc at ${rc_path}/.terraformrc. Overriding."; + export TF_CLI_CONFIG_FILE="${rc_path}/.terraformrc"; + fi; +done; + +# Configure the plugin-cache location so plugins are not +# downloaded to individual components +declare default_plugin_cache_dir="$(pwd)/plugin-cache"; +export TF_PLUGIN_CACHE_DIR="${TF_PLUGIN_CACHE_DIR:-${default_plugin_cache_dir}}" +mkdir -p "${TF_PLUGIN_CACHE_DIR}" \ + || error_and_die "Failed to created the plugin-cache directory (${TF_PLUGIN_CACHE_DIR})"; +[ -w "${TF_PLUGIN_CACHE_DIR}" ] \ + || error_and_die "plugin-cache directory (${TF_PLUGIN_CACHE_DIR}) not writable"; + +# Clear cache, safe enough as we enforce plugin cache +rm -rf ${component_path}/.terraform; + +# Run global pre.sh +if [ -f "pre.sh" ]; then + source pre.sh "${region}" "${environment}" "${action}" \ + || error_and_die "Global pre script execution failed with exit code ${?}"; +fi; + +# Make sure we're running in the component directory +pushd "${component_path}"; +readonly component_name=$(basename ${component_path}); + +# Check for presence of tfenv (https://github.com/kamatama41/tfenv) +# and a .terraform-version file. If both present, ensure required +# version of terraform for this component is installed automagically. +tfenv_bin="$(which tfenv 2>/dev/null)"; +if [[ -n "${tfenv_bin}" && -x "${tfenv_bin}" && -f .terraform-version ]]; then + ${tfenv_bin} install; +fi; + +# Regardless of bootstrapping or not, we'll be using this string. +# If bootstrapping, we will fill it with variables, +# if not we will fill it with variable file parameters +declare tf_var_params; + +if [ "${bootstrap}" == "true" ]; then + if [ "${action}" == "destroy" ]; then + error_and_die "You cannot destroy a bootstrap bucket using tfscaffold, it's just too dangerous. If you're absolutely certain that you want to delete the bucket and all contents, including any possible state files environments and components within this project, then you will need to do it from the AWS Console. Note you cannot do this from the CLI because the bootstrap bucket is versioned, and even the --force CLI parameter will not empty the bucket of versions"; + fi; + + # Bootstrap requires this parameter as explicit as it is constructed here + # for multiple uses, so we cannot just depend on it being set in tfvars + tf_var_params+=" -var bucket_name=${bucket}"; +fi; + +# Run pre.sh +if [ -f "pre.sh" ]; then + source pre.sh "${region}" "${environment}" "${action}" \ + || error_and_die "Component pre script execution failed with exit code ${?}"; +fi; + +# Pull down secret TFVAR file from S3 +# Anti-pattern and security warning: This secrets mechanism provides very little additional security. +# It permits you to inject secrets directly into terraform without storing them in source control or unencrypted in S3. +# Secrets will still be stored in all copies of your state file - which will be stored on disk wherever this script is run and in S3. +# This script does not currently support encryption of state files. +# Use this feature only if you're sure it's the right pattern for your use case. +declare -a secrets=(); +readonly secrets_file_name="secret.tfvars.enc"; +readonly secrets_file_path="build/${secrets_file_name}"; +aws s3 ls s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${secrets_file_name} >/dev/null 2>&1; +if [ $? -eq 0 ]; then + mkdir -p build; + aws s3 cp s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${secrets_file_name} ${secrets_file_path} \ + || error_and_die "S3 secrets file is present, but inaccessible. Ensure you have permission to read s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${secrets_file_name}"; + if [ -f "${secrets_file_path}" ]; then + secrets=($(aws kms decrypt --ciphertext-blob fileb://${secrets_file_path} --output text --query Plaintext | base64 --decode)); + fi; +fi; + +if [ -n "${secrets[0]}" ]; then + secret_regex='^[A-Za-z0-9_-]+=.+$'; + secret_count=1; + for secret_line in "${secrets[@]}"; do + if [[ "${secret_line}" =~ ${secret_regex} ]]; then + var_key="${secret_line%=*}"; + var_val="${secret_line##*=}"; + export TF_VAR_${var_key}="${var_val}"; + ((secret_count++)); + else + echo "Malformed secret on line ${secret_count} - ignoring"; + fi; + done; +fi; + +# Pull down additional dynamic plaintext tfvars file from S3 +# Anti-pattern warning: Your variables should almost always be in source control. +# There are a very few use cases where you need constant variability in input variables, +# and even in those cases you should probably pass additional -var parameters to this script +# from your automation mechanism. +# Use this feature only if you're sure it's the right pattern for your use case. +readonly dynamic_file_name="dynamic.tfvars"; +readonly dynamic_file_path="build/${dynamic_file_name}"; +aws s3 ls s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${dynamic_file_name} >/dev/null 2>&1; +if [ $? -eq 0 ]; then + aws s3 cp s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${dynamic_file_name} ${dynamic_file_path} \ + || error_and_die "S3 tfvars file is present, but inaccessible. Ensure you have permission to read s3://${bucket}/${project}/${aws_account_id}/${region}/${environment}/${dynamic_file_name}"; +fi; + +# Use versions TFVAR files if exists +readonly versions_file_name="versions_${region}_${environment}.tfvars"; +readonly versions_file_path="${base_path}/etc/${versions_file_name}"; + +# Check for presence of an environment variables file, and use it if readable +if [ -n "${environment}" ]; then + readonly env_file_path="${base_path}/etc/env_${region}_${environment}.tfvars"; +fi; + +# Check for presence of a global variables file, and use it if readable +readonly global_vars_file_name="global.tfvars"; +readonly global_vars_file_path="${base_path}/etc/${global_vars_file_name}"; + +# Check for presence of a region variables file, and use it if readable +readonly region_vars_file_name="${region}.tfvars"; +readonly region_vars_file_path="${base_path}/etc/${region_vars_file_name}"; + +# Check for presence of a group variables file if specified, and use it if readable +if [ -n "${group}" ]; then + readonly group_vars_file_name="group_${group}.tfvars"; + readonly group_vars_file_path="${base_path}/etc/${group_vars_file_name}"; +fi; + +# Collect the paths of the variables files to use +declare -a tf_var_file_paths; + +# Use Global and Region first, to allow potential for terraform to do the +# honourable thing and override global and region settings with environment +# specific ones; however we do not officially support the same variable +# being declared in multiple locations, and we warn when we find any duplicates +[ -f "${global_vars_file_path}" ] && tf_var_file_paths+=("${global_vars_file_path}"); +[ -f "${region_vars_file_path}" ] && tf_var_file_paths+=("${region_vars_file_path}"); + +# If a group has been specified, load the vars for the group. If we are to assume +# terraform correctly handles override-ordering (which to be fair we don't hence +# the warning about duplicate variables below) we add this to the list after +# global and region-global variables, but before the environment variables +# so that the environment can explicitly override variables defined in the group. +if [ -n "${group}" ]; then + if [ -f "${group_vars_file_path}" ]; then + tf_var_file_paths+=("${group_vars_file_path}"); + else + echo -e "[WARNING] Group \"${group}\" has been specified, but no group variables file is available at ${group_vars_file_path}"; + fi; +fi; + +# Environment is normally expected, but in bootstrapping it may not be provided +if [ -n "${environment}" ]; then + if [ -f "${env_file_path}" ]; then + tf_var_file_paths+=("${env_file_path}"); + else + echo -e "[WARNING] Environment \"${environment}\" has been specified, but no environment variables file is available at ${env_file_path}"; + fi; +fi; + +# If present and readable, use versions and dynamic variables too +[ -f "${versions_file_path}" ] && tf_var_file_paths+=("${versions_file_path}"); +[ -f "${dynamic_file_path}" ] && tf_var_file_paths+=("${dynamic_file_path}"); + +# Warn on duplication +duplicate_variables="$(cat "${tf_var_file_paths[@]}" | sed -n -e 's/\(^[a-zA-Z0-9_\-]\+\)\s*=.*$/\1/p' | sort | uniq -d)"; +[ -n "${duplicate_variables}" ] \ + && echo -e " +################################################################### +# WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING # +################################################################### +The following input variables appear to be duplicated: + +${duplicate_variables} + +This could lead to unexpected behaviour. Overriding of variables +has previously been unpredictable and is not currently supported, +but it may work. + +Recent changes to terraform might give you useful overriding and +map-merging functionality, please use with caution and report back +on your successes & failures. +###################################################################"; + +# Build up the tfvars arguments for terraform command line +for file_path in "${tf_var_file_paths[@]}"; do + tf_var_params+=" -var-file=${file_path}"; +done; + +## +# Start Doing Real Things +## + +# Really Hashicorp? Really?! +# +# In order to work with terraform >=0.9.2 (I say 0.9.2 because 0.9 prior +# to 0.9.2 is barely usable due to key bugs and missing features) +# we now need to do some ugly things to our terraform remote backend configuration. +# The long term hope is that they will fix this, and maybe remove the need for it +# altogether by supporting interpolation in the backend config stanza. +# +# For now we're left with this garbage, and no more support for <0.9.0. +if [ -f backend_tfscaffold.tf ]; then + echo -e "WARNING: backend_tfscaffold.tf exists and will be overwritten!" >&2; +fi; + +declare backend_prefix; +declare backend_filename; + +if [ "${bootstrap}" == "true" ]; then + backend_prefix="${project}/${aws_account_id}/${region}/bootstrap"; + backend_filename="bootstrap.tfstate"; +else + backend_prefix="${project}/${aws_account_id}/${region}/${environment}"; + backend_filename="${component_name}.tfstate"; +fi; + +readonly backend_key="${backend_prefix}/${backend_filename}"; +readonly backend_config="terraform { + backend \"s3\" { + region = \"${region}\" + bucket = \"${bucket}\" + key = \"${backend_key}\" + dynamodb_table = \"${bucket}\" + } +}"; + +# We're now all ready to go. All that's left is to: +# * Write the backend config +# * terraform init +# * terraform ${action} +# +# But if we're dealing with the special bootstrap component +# we can't remotely store the backend until we've bootstrapped it +# +# So IF the S3 bucket already exists, we will continue as normal +# because we want to be able to manage changes to an existing +# bootstrap bucket. But if it *doesn't* exist, then we need to be +# able to plan and apply it with a local state, and *then* configure +# the remote state. + +# In default operations we assume we are already bootstrapped +declare bootstrapped="true"; + +# If we are in bootstrap mode, we need to know if we have already bootstrapped +# or we are working with or modifying an existing bootstrap bucket +if [ "${bootstrap}" == "true" ]; then + # For this exist check we could do many things, but we explicitly perform + # an ls against the key we will be working with so as to not require + # permissions to, for example, list all buckets, or the bucket root keyspace + aws s3 ls s3://${bucket}/${backend_prefix}/${backend_filename} >/dev/null 2>&1; + [ $? -eq 0 ] || bootstrapped="false"; +fi; + +if [ "${bootstrapped}" == "true" ]; then + echo -e "${backend_config}" > backend_tfscaffold.tf \ + || error_and_die "Failed to write backend config to $(pwd)/backend_tfscaffold.tf"; + + # Nix the horrible hack on exit + trap "rm -f $(pwd)/backend_tfscaffold.tf" EXIT; + + declare lockfile_or_upgrade; + [ -n ${lockfile} ] && lockfile_or_upgrade='-upgrade' || lockfile_or_upgrade="${lockfile}"; + + # Configure remote state storage + echo "Setting up S3 remote state from s3://${bucket}/${backend_key}"; + terraform init ${no_color} ${compact_warnings} ${lockfile_or_upgrade} \ + || error_and_die "Terraform init failed"; +else + # We are bootstrapping. Download the providers, skip the backend config. + terraform init \ + -backend=false \ + ${no_color} \ + ${compact_warnings} \ + ${lockfile} \ + || error_and_die "Terraform init failed"; +fi; + +case "${action}" in + 'plan') + if [ -n "${build_id}" ]; then + mkdir -p build; + + plan_file_name="${component_name}_${build_id}.tfplan"; + plan_file_remote_key="${backend_prefix}/plans/${plan_file_name}"; + + out="-out=build/${plan_file_name}"; + fi; + + if [ "${detailed_exitcode}" == "true" ]; then + detailed="-detailed-exitcode"; + fi; + + terraform "${action}" \ + -input=false \ + ${refresh} \ + ${tf_var_params} \ + ${extra_args} \ + ${destroy} \ + ${out} \ + ${detailed} \ + -parallelism=300; + + status="${?}"; + + # Even when detailed exitcode is set, a 1 is still a fail, + # so exit + # (detailed exit codes are 0 and 2) + if [ "${status}" -eq 1 ]; then + error_and_die "Terraform plan failed"; + fi; + + if [ -n "${build_id}" ]; then + aws s3 cp build/${plan_file_name} s3://${bucket}/${plan_file_remote_key} \ + || error_and_die "Plan file upload to S3 failed (s3://${bucket}/${plan_file_remote_key})"; + fi; + + exit ${status}; + ;; + 'graph') + mkdir -p build || error_and_die "Failed to create output directory '$(pwd)/build'"; + terraform graph ${extra_args} -draw-cycles | dot -Tpng > build/${project}-${aws_account_id}-${region}-${environment}.png \ + || error_and_die "Terraform simple graph generation failed"; + terraform graph ${extra_args} -draw-cycles -verbose | dot -Tpng > build/${project}-${aws_account_id}-${region}-${environment}-verbose.png \ + || error_and_die "Terraform verbose graph generation failed"; + exit 0; + ;; + 'apply'|'destroy'|'refresh') + + # Support for terraform <0.10 is now deprecated + if [ "${action}" == "apply" ]; then + echo "Compatibility: Adding to terraform arguments: -auto-approve=true"; + extra_args+=" -auto-approve=true"; + else # action is `destroy` + # Check terraform version - if pre-0.15, need to add `-force`; 0.15 and above instead use `-auto-approve` + if [ $(terraform version | head -n1 | cut -d" " -f2 | cut -d"." -f1) == "v0" ] && [ $(terraform version | head -n1 | cut -d" " -f2 | cut -d"." -f2) -lt 15 ]; then + echo "Compatibility: Adding to terraform arguments: -force"; + force='-force'; + elif [ "${action}" != "refresh" ]; then + extra_args+=" -auto-approve"; + fi; + fi; + + if [ -n "${build_id}" ]; then + mkdir -p build; + plan_file_name="${component_name}_${build_id}.tfplan"; + plan_file_remote_key="${backend_prefix}/plans/${plan_file_name}"; + + aws s3 cp s3://${bucket}/${plan_file_remote_key} build/${plan_file_name} \ + || error_and_die "Plan file download from S3 failed (s3://${bucket}/${plan_file_remote_key})"; + + apply_plan="build/${plan_file_name}"; + + terraform "${action}" \ + -input=false \ + ${refresh} \ + -parallelism=300 \ + ${extra_args} \ + ${force} \ + ${apply_plan}; + exit_code=$?; + else + terraform "${action}" \ + -input=false \ + ${refresh} \ + ${tf_var_params} \ + -parallelism=300 \ + ${extra_args} \ + ${force}; + exit_code=$?; + + if [ "${bootstrapped}" == "false" ]; then + # If we are here, and we are in bootstrap mode, and not already bootstrapped, + # Then we have just bootstrapped for the first time! Congratulations. + # Now we need to copy our state file into the bootstrap bucket + echo -e "${backend_config}" > backend_tfscaffold.tf \ + || error_and_die "Failed to write backend config to $(pwd)/backend_tfscaffold.tf"; + + # Nix the horrible hack on exit + trap "rm -f $(pwd)/backend_tfscaffold.tf" EXIT; + + # Push Terraform Remote State to S3 + # TODO: Add -upgrade to init when we drop support for <0.10 + echo "yes" | terraform init ${lockfile} || error_and_die "Terraform init failed"; + + # Hard cleanup + rm -f backend_tfscaffold.tf; + rm -f terraform.tfstate # Prime not the backup + rm -rf .terraform; + + # This doesn't mean anything here, we're just celebrating! + bootstrapped="true"; + fi; + + fi; + + if [ ${exit_code} -ne 0 ]; then + error_and_die "Terraform ${action} failed with exit code ${exit_code}"; + fi; + + if [ -f "post.sh" ]; then + source post.sh "${region}" "${environment}" "${action}" \ + || error_and_die "Component post script execution failed with exit code ${?}"; + fi; + ;; + '*taint') + terraform "${action}" ${extra_args} || error_and_die "Terraform ${action} failed."; + ;; + 'import') + terraform "${action}" ${tf_var_params} ${extra_args} || error_and_die "Terraform ${action} failed."; + ;; + 'shell') + echo -e "Here's a shell for the ${component} component.\nIf you want to run terraform actions specific to the ${environment}, pass the following options:\n\n${tf_var_params} ${extra_args}\n\n'exit 0' / 'Ctrl-D' to continue, other exit codes will abort tfscaffold with the same code."; + bash -l || exit "${?}"; + ;; + *) + echo -e "Generic action case invoked. Only the additional arguments will be passed to terraform, you break it you fix it:"; + echo -e "\tterraform ${action} ${extra_args}"; + terraform "${action}" ${extra_args} \ + || error_and_die "Terraform ${action} failed."; + ;; +esac; + +popd + +if [ -f "post.sh" ]; then + source post.sh "${region}" "${environment}" "${action}" \ + || error_and_die "Global post script execution failed with exit code ${?}"; +fi; + +exit 0; diff --git a/terraform/bootstrap/.terraform-version b/terraform/bootstrap/.terraform-version new file mode 100644 index 0000000..80e78df --- /dev/null +++ b/terraform/bootstrap/.terraform-version @@ -0,0 +1 @@ +1.3.5 diff --git a/terraform/bootstrap/data_iam_policy_document_bucket.tf b/terraform/bootstrap/data_iam_policy_document_bucket.tf new file mode 100644 index 0000000..dd231f5 --- /dev/null +++ b/terraform/bootstrap/data_iam_policy_document_bucket.tf @@ -0,0 +1,68 @@ +data "aws_iam_policy_document" "bucket" { + statement { + sid = "DontAllowNonSecureConnection" + effect = "Deny" + + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.bucket.arn, + "${aws_s3_bucket.bucket.arn}/*", + ] + + principals { + type = "AWS" + + identifiers = [ + "*", + ] + } + + condition { + test = "Bool" + variable = "aws:SecureTransport" + + values = [ + "false", + ] + } + } + + statement { + sid = "AllowManagedAccountsToList" + effect = "Allow" + + actions = [ + "s3:ListBucket", + ] + + resources = [ + aws_s3_bucket.bucket.arn, + ] + + principals { + type = "AWS" + identifiers = local.ro_principals + } + } + + statement { + sid = "AllowManagedAccountsToGet" + effect = "Allow" + + actions = [ + "s3:GetObject", + ] + + resources = [ + "${aws_s3_bucket.bucket.arn}/*", + ] + + principals { + type = "AWS" + identifiers = local.ro_principals + } + } +} diff --git a/terraform/bootstrap/data_iam_policy_document_kms_key_s3.tf b/terraform/bootstrap/data_iam_policy_document_kms_key_s3.tf new file mode 100644 index 0000000..9741a08 --- /dev/null +++ b/terraform/bootstrap/data_iam_policy_document_kms_key_s3.tf @@ -0,0 +1,46 @@ +data "aws_iam_policy_document" "kms_key_s3" { + statement { + sid = "AllowLocalIAMAdministration" + effect = "Allow" + + actions = [ + "*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = [ + "arn:aws:iam::${var.aws_account_id}:root", + ] + } + } + + statement { + sid = "AllowManagedAccountsToUse" + effect = "Allow" + + actions = [ + "kms:Decrypt", + "kms:DescribeKey", + "kms:Encrypt", + "kms:GenerateDataKey", + "kms:GenerateDataKeyPair", + "kms:GenerateDataKeyPairWithoutPlaintext", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:ReEncrypt", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = local.ro_principals + } + } +} diff --git a/terraform/bootstrap/dynamodb_table.tf b/terraform/bootstrap/dynamodb_table.tf new file mode 100644 index 0000000..a5510f8 --- /dev/null +++ b/terraform/bootstrap/dynamodb_table.tf @@ -0,0 +1,26 @@ +resource "aws_dynamodb_table" "tfscaffold" { + name = var.bucket_name + hash_key = "LockID" + billing_mode = "PAY_PER_REQUEST" + + attribute { + name = "LockID" + type = "S" + } + + point_in_time_recovery { + enabled = true + } + + server_side_encryption { + enabled = true + kms_key_arn = aws_kms_key.s3.arn + } + + tags = merge( + local.default_tags, + { + Name = var.bucket_name + }, + ) +} diff --git a/terraform/bootstrap/kms_key_s3.tf b/terraform/bootstrap/kms_key_s3.tf new file mode 100644 index 0000000..d8a9ff4 --- /dev/null +++ b/terraform/bootstrap/kms_key_s3.tf @@ -0,0 +1,16 @@ +resource "aws_kms_key" "s3" { + description = "tfscaffold Bootstrap S3 Bucket" + deletion_window_in_days = 10 + enable_key_rotation = true + + policy = data.aws_iam_policy_document.kms_key_s3.json + + # This does not use default tag map merging because bootstrapping is special + # You should use default tag map merging elsewhere + tags = merge( + local.default_tags, + { + Name = "tfscaffold Bootstrap S3 Bucket" + } + ) +} diff --git a/terraform/bootstrap/locals.tf b/terraform/bootstrap/locals.tf new file mode 100644 index 0000000..1449f88 --- /dev/null +++ b/terraform/bootstrap/locals.tf @@ -0,0 +1,13 @@ +locals { + ro_principals = compact(distinct(flatten([ + var.tfscaffold_ro_principals, + "arn:aws:iam::${var.aws_account_id}:root", + ]))) + + default_tags = { + "tfscaffold:Environment" = var.environment + "tfscaffold:Project" = var.project + "tfscaffold:Component" = var.component + "tfscaffold:Account" = var.aws_account_id + } +} diff --git a/terraform/bootstrap/outputs.tf b/terraform/bootstrap/outputs.tf new file mode 100644 index 0000000..05b4902 --- /dev/null +++ b/terraform/bootstrap/outputs.tf @@ -0,0 +1,23 @@ +output "bucket_name" { + value = aws_s3_bucket.bucket.id +} + +output "bucket_policy" { + value = data.aws_iam_policy_document.bucket.json +} + +output "bucket_arn" { + value = aws_s3_bucket.bucket.arn +} + +output "kms_key_arn" { + value = aws_kms_key.s3.arn +} + +output "kms_key_id" { + value = aws_kms_key.s3.id +} + +output "kms_key_policy" { + value = data.aws_iam_policy_document.kms_key_s3.json +} diff --git a/terraform/bootstrap/provider_aws.tf b/terraform/bootstrap/provider_aws.tf new file mode 100644 index 0000000..02a8858 --- /dev/null +++ b/terraform/bootstrap/provider_aws.tf @@ -0,0 +1,12 @@ +# The default AWS provider in the default region +provider "aws" { + region = var.region + + # For no reason other than redundant safety + # we only allow the use of the AWS Account + # specified in the environment variables. + # This helps to prevent accidents. + allowed_account_ids = [ + var.aws_account_id, + ] +} diff --git a/terraform/bootstrap/s3_bucket.tf b/terraform/bootstrap/s3_bucket.tf new file mode 100644 index 0000000..5d5e092 --- /dev/null +++ b/terraform/bootstrap/s3_bucket.tf @@ -0,0 +1,14 @@ +resource "aws_s3_bucket" "bucket" { + bucket = var.bucket_name + + force_destroy = false + + # This does not use default tag map merging because bootstrapping is special + # You should use default tag map merging elsewhere + tags = merge( + local.default_tags, + { + Name = "Terraform Scaffold State File Bucket for account ${var.aws_account_id} in region ${var.region}" + } + ) +} diff --git a/terraform/bootstrap/s3_bucket_lifecycle_configuration.tf b/terraform/bootstrap/s3_bucket_lifecycle_configuration.tf new file mode 100644 index 0000000..4e173b6 --- /dev/null +++ b/terraform/bootstrap/s3_bucket_lifecycle_configuration.tf @@ -0,0 +1,26 @@ +resource "aws_s3_bucket_lifecycle_configuration" "bucket" { + bucket = aws_s3_bucket.bucket.id + + rule { + id = "bootstrap" + status = "Enabled" + + filter { + prefix = "" + } + + noncurrent_version_transition { + noncurrent_days = "30" + storage_class = "STANDARD_IA" + } + + noncurrent_version_transition { + noncurrent_days = "60" + storage_class = "GLACIER" + } + + noncurrent_version_expiration { + noncurrent_days = "90" + } + } +} diff --git a/terraform/bootstrap/s3_bucket_ownership_controls.tf b/terraform/bootstrap/s3_bucket_ownership_controls.tf new file mode 100644 index 0000000..fc4a359 --- /dev/null +++ b/terraform/bootstrap/s3_bucket_ownership_controls.tf @@ -0,0 +1,7 @@ +resource "aws_s3_bucket_ownership_controls" "bucket" { + bucket = aws_s3_bucket.bucket.id + + rule { + object_ownership = "BucketOwnerEnforced" + } +} diff --git a/terraform/bootstrap/s3_bucket_policy.tf b/terraform/bootstrap/s3_bucket_policy.tf new file mode 100644 index 0000000..d12922a --- /dev/null +++ b/terraform/bootstrap/s3_bucket_policy.tf @@ -0,0 +1,8 @@ +resource "aws_s3_bucket_policy" "bucket" { + bucket = aws_s3_bucket.bucket.id + policy = data.aws_iam_policy_document.bucket.json + + depends_on = [ + aws_s3_bucket_public_access_block.bucket, + ] +} diff --git a/terraform/bootstrap/s3_bucket_public_access_block.tf b/terraform/bootstrap/s3_bucket_public_access_block.tf new file mode 100644 index 0000000..d134b31 --- /dev/null +++ b/terraform/bootstrap/s3_bucket_public_access_block.tf @@ -0,0 +1,8 @@ +resource "aws_s3_bucket_public_access_block" "bucket" { + bucket = aws_s3_bucket.bucket.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} diff --git a/terraform/bootstrap/s3_bucket_server_side_encryption_configuration.tf b/terraform/bootstrap/s3_bucket_server_side_encryption_configuration.tf new file mode 100644 index 0000000..5733d98 --- /dev/null +++ b/terraform/bootstrap/s3_bucket_server_side_encryption_configuration.tf @@ -0,0 +1,12 @@ +resource "aws_s3_bucket_server_side_encryption_configuration" "bucket" { + bucket = aws_s3_bucket.bucket.id + + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.s3.arn + sse_algorithm = "aws:kms" + } + + bucket_key_enabled = true + } +} diff --git a/terraform/bootstrap/s3_bucket_versioning.tf b/terraform/bootstrap/s3_bucket_versioning.tf new file mode 100644 index 0000000..80c1ab9 --- /dev/null +++ b/terraform/bootstrap/s3_bucket_versioning.tf @@ -0,0 +1,7 @@ +resource "aws_s3_bucket_versioning" "bucket" { + bucket = aws_s3_bucket.bucket.id + + versioning_configuration { + status = "Enabled" + } +} diff --git a/terraform/bootstrap/variables.tf b/terraform/bootstrap/variables.tf new file mode 100644 index 0000000..d7b1f44 --- /dev/null +++ b/terraform/bootstrap/variables.tf @@ -0,0 +1,37 @@ +variable "project" { + type = string + description = "The name of the Project we are bootstrapping tfscaffold for" +} + +variable "aws_account_id" { + type = string + description = "The AWS Account ID into which we are bootstrapping tfscaffold" +} + +variable "region" { + type = string + description = "The AWS Region into which we are bootstrapping tfscaffold" +} + +variable "environment" { + type = string + description = "The name of the environment for the bootstrapping process; which is always bootstrap" + default = "bootstrap" +} + +variable "component" { + type = string + description = "The name of the component for the bootstrapping process; which is always bootstrap" + default = "bootstrap" +} + +variable "bucket_name" { + type = string + description = "The name to use for the tfscaffold bucket. This should be provided from tfscaffold shell, not environment or group tfvars" +} + +variable "tfscaffold_ro_principals" { + type = list(string) + description = "A list of Principals permitted to ListBucket and GetObject for Remote State purposes. Normally the root principal of the account" + default = [] +} diff --git a/terraform/bootstrap/versions.tf b/terraform/bootstrap/versions.tf new file mode 100644 index 0000000..87dc6a9 --- /dev/null +++ b/terraform/bootstrap/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.61.0" + } + } + + required_version = ">= 0.14.7" +} diff --git a/terraform/components/.gitkeep b/terraform/components/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/terraform/components/dnsroot/.terraform-version b/terraform/components/dnsroot/.terraform-version new file mode 100644 index 0000000..631f790 --- /dev/null +++ b/terraform/components/dnsroot/.terraform-version @@ -0,0 +1 @@ +latest:^1\.8\. diff --git a/terraform/components/dnsroot/locals_tfscaffold.tf b/terraform/components/dnsroot/locals_tfscaffold.tf new file mode 100644 index 0000000..b7cf321 --- /dev/null +++ b/terraform/components/dnsroot/locals_tfscaffold.tf @@ -0,0 +1,44 @@ +locals { + terraform_state_bucket = format( + "%s-tfscaffold-%s-%s", + var.project, + var.aws_account_id, + var.region, + ) + + csi = replace( + format( + "%s-%s-%s", + var.project, + var.environment, + var.component, + ), + "_", + "", + ) + + # CSI for use in resources with a global namespace, i.e. S3 Buckets + csi_global = replace( + format( + "%s-%s-%s-%s-%s", + var.project, + var.aws_account_id, + var.region, + var.environment, + var.component, + ), + "_", + "", + ) + + default_tags = merge( + var.default_tags, + { + Project = var.project + Environment = var.environment + Component = var.component + Group = var.group + Name = local.csi + }, + ) +} diff --git a/terraform/components/dnsroot/provider_aws.tf b/terraform/components/dnsroot/provider_aws.tf new file mode 100644 index 0000000..e66f225 --- /dev/null +++ b/terraform/components/dnsroot/provider_aws.tf @@ -0,0 +1,17 @@ +provider "aws" { + region = var.region + + allowed_account_ids = [ + var.aws_account_id, + ] + + default_tags { + tags = { + Project = var.project + Environment = var.environment + Component = var.component + Group = var.group + Name = local.csi + } + } +} diff --git a/terraform/components/dnsroot/route53_delegation_set_main.tf b/terraform/components/dnsroot/route53_delegation_set_main.tf new file mode 100644 index 0000000..d0c41db --- /dev/null +++ b/terraform/components/dnsroot/route53_delegation_set_main.tf @@ -0,0 +1,3 @@ +resource "aws_route53_delegation_set" "root" { + reference_name = var.root_domain_name +} diff --git a/terraform/components/dnsroot/route53_zone_main.tf b/terraform/components/dnsroot/route53_zone_main.tf new file mode 100644 index 0000000..c1bbd8b --- /dev/null +++ b/terraform/components/dnsroot/route53_zone_main.tf @@ -0,0 +1,5 @@ +resource "aws_route53_zone" "root" { + name = var.root_domain_name + + delegation_set_id = aws_route53_delegation_set.root.id +} diff --git a/terraform/components/dnsroot/variables.tf b/terraform/components/dnsroot/variables.tf new file mode 100644 index 0000000..b4ff389 --- /dev/null +++ b/terraform/components/dnsroot/variables.tf @@ -0,0 +1,58 @@ +## +# Basic Required Variables for tfscaffold Components +## + +variable "project" { + type = string + description = "The name of the tfscaffold project" +} + +variable "environment" { + type = string + description = "The name of the tfscaffold environment" +} + +variable "aws_account_id" { + type = string + description = "The AWS Account ID (numeric)" +} + +variable "region" { + type = string + description = "The AWS Region" +} + +variable "group" { + type = string + description = "The group variables are being inherited from (often synonmous with account short-name)" +} + +## +# tfscaffold variables specific to this component +## + +# This is the only primary variable to have its value defined as +# a default within its declaration in this file, because the variables +# purpose is as an identifier unique to this component, rather +# then to the environment from where all other variables come. +variable "component" { + type = string + description = "The variable encapsulating the name of this component" + default = "dnsroot" +} + +variable "default_tags" { + type = map(string) + description = "A map of default tags to apply to all taggable resources within the component" + default = {} +} + + +## +# Variables specific to the "dnsroot"component +## + +variable "root_domain_name" { + type = string + description = "The root-level domain name to create a zone for onward delegation of subdomains to other services" +} diff --git a/terraform/components/dnsroot/versions.tf b/terraform/components/dnsroot/versions.tf new file mode 100644 index 0000000..ee15bad --- /dev/null +++ b/terraform/components/dnsroot/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.50" + } + } + + required_version = "~> 1.8.4" +} diff --git a/terraform/components/examplecomponent/main.tf b/terraform/components/examplecomponent/main.tf new file mode 100644 index 0000000..8680564 --- /dev/null +++ b/terraform/components/examplecomponent/main.tf @@ -0,0 +1 @@ +# Create root level resources here... diff --git a/terraform/components/examplecomponent/outputs.tf b/terraform/components/examplecomponent/outputs.tf new file mode 100644 index 0000000..9dcc2f3 --- /dev/null +++ b/terraform/components/examplecomponent/outputs.tf @@ -0,0 +1 @@ +# Define the outputs for the component. The outputs may well be referenced by other component in the same or different environments using terraform_remote_state data sources... diff --git a/terraform/components/examplecomponent/variables.tf b/terraform/components/examplecomponent/variables.tf new file mode 100644 index 0000000..7ba82b0 --- /dev/null +++ b/terraform/components/examplecomponent/variables.tf @@ -0,0 +1 @@ +# Define the variables that will be initialised in etc/{env,versions}__.tfvars... diff --git a/terraform/etc/.gitkeep b/terraform/etc/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/terraform/etc/env_eu-west-2_dev.tfvars b/terraform/etc/env_eu-west-2_dev.tfvars new file mode 100644 index 0000000..862a940 --- /dev/null +++ b/terraform/etc/env_eu-west-2_dev.tfvars @@ -0,0 +1,6 @@ +environment = "dev" +group = null + +aws_account_id = "767397753705" + +root_domain_name = "dev.nhsnotify.national.nhs.uk" diff --git a/terraform/etc/env_eu-west-2_poc-epc.tfvars b/terraform/etc/env_eu-west-2_poc-epc.tfvars new file mode 100644 index 0000000..df71a0b --- /dev/null +++ b/terraform/etc/env_eu-west-2_poc-epc.tfvars @@ -0,0 +1,6 @@ +environment = "poc-epc" +group = null + +aws_account_id = "767397886959" + +root_domain_name = "poc-epc.example" diff --git a/terraform/etc/eu-west-2.tfvars b/terraform/etc/eu-west-2.tfvars new file mode 100644 index 0000000..53cd511 --- /dev/null +++ b/terraform/etc/eu-west-2.tfvars @@ -0,0 +1,2 @@ +# Specific to region within project/AWS Account +region = "eu-west-2" diff --git a/terraform/etc/global.tfvars b/terraform/etc/global.tfvars new file mode 100644 index 0000000..dff8bf4 --- /dev/null +++ b/terraform/etc/global.tfvars @@ -0,0 +1,6 @@ +# Specific to whole project / AWS Account +# CHANGEME: these should be set for the project you are working on +# project should ideally be as short as possible whilst being meaningful as it will be used in resource naming! +# aws_account_id should be set to the AWS account ID you are running Terraform in the context of - you will get errors otherwise... +project = "nhs-notify" +aws_account_id = "012345678901" diff --git a/terraform/etc/group_examplegroup.tfvars b/terraform/etc/group_examplegroup.tfvars new file mode 100644 index 0000000..5949382 --- /dev/null +++ b/terraform/etc/group_examplegroup.tfvars @@ -0,0 +1 @@ +# Variables shared by any environment that chooses to be subscribed to it diff --git a/terraform/etc/versions_eg-region-1_exampleenv.tfvars b/terraform/etc/versions_eg-region-1_exampleenv.tfvars new file mode 100644 index 0000000..31b0602 --- /dev/null +++ b/terraform/etc/versions_eg-region-1_exampleenv.tfvars @@ -0,0 +1,2 @@ +# Define variable values to be fed into components in the components directory that will each form a part of the examplenv environment... +my_example_docker_app_version = "0.0.1" diff --git a/terraform/modules/.gitkeep b/terraform/modules/.gitkeep new file mode 100644 index 0000000..e69de29