diff --git a/README.md b/README.md index 7ab25a7..1ff1282 100755 --- a/README.md +++ b/README.md @@ -1,22 +1,22 @@ -This Terraform deploys a stateless containerised sshd bastion service on AWS with IAM based authentication: -=================================== +# This Terraform deploys a stateless containerised sshd bastion service on AWS with IAM based authentication: -**This module requires Terraform 0.13** +**This module requires Terraform >/=0.15/1.x.x** -- Terraform 0.12.x was *previously* supported with module version to ~> v5.0 -- Terraform 0.11.x was *previously* supported with module version to ~> v4.0 +- Terraform 0.13.x was _previously_ supported with module version to ~> v6.1 +- Terraform 0.12.x was _previously_ supported with module version to ~> v5.0 +- Terraform 0.11.x was _previously_ supported with module version to ~> v4.0 **N.B. If you are using a newer version of this module when you have an older version deployed, please review the changelog!** # Overview -This plan provides socket-activated sshd-containers with one container instantiated per connection and destroyed on connection termination or else after 12 hours- to deter things like reverse tunnels etc. The host assumes an IAM role, inherited by the containers, allowing it to query IAM users and request their ssh public keys lodged with AWS. +This plan provides socket-activated sshd-containers with one container instantiated per connection and destroyed on connection termination or else after 12 hours- to deter things like reverse tunnels etc. The host assumes an IAM role, inherited by the containers, allowing it to query IAM users and request their ssh public keys lodged with AWS. **It is essential to limit incoming service traffic to whitelisted ports.** If you do not then internet background noise will exhaust the host resources and/ or lead to rate limiting from amazon on the IAM identity calls- resulting in denial of service. **It is possible to replace the components in userdata and the base AMI with components of your own choosing. The following describes deployment with all sections as provided by module defaults.** -The actual call for public keys is made with a [GO binary](https://github.com/Fullscreen/iam-authorized-keys-command), which is built during host instance intial launch and made available via shared volume in the docker image. In use the Docker container queries AWS for users with ssh keys at runtime, creates local linux user accounts for them and handles their login. The users who may access the bastion service may be restricted to membership of a defined AWS IAM group which is not set up or managed by this plan. When the connection is closed the container exits. This means that users log in _as themselves_ and manage their own ssh keys using the AWS web console or CLI. For any given session they will arrive in a vanilla Ubuntu container with passwordless sudo and can install whatever applications and frameworks might be required for that session. Because the IAM identity checking and user account population is done at container run time and the containers are called on demand, there is no delay between creating an account with a public ssh key on AWS and being able to access the bastion. If users have more than one ssh public key then their account will be set up so that any of them may be used- AWS allows up to 5 keys per user. Aside from the resources provided by AWS and remote public repositories this plan is entirely self contained. There is no reliance on registries, build chains etc. +The actual call for public keys is made with a [GO binary](https://github.com/Fullscreen/iam-authorized-keys-command), which is built during host instance initial launch and made available via shared volume in the docker image. In use the Docker container queries AWS for users with ssh keys at runtime, creates local linux user accounts for them and handles their login. The users who may access the bastion service may be restricted to membership of a defined AWS IAM group which is not set up or managed by this plan. When the connection is closed the container exits. This means that users log in _as themselves_ and manage their own ssh keys using the AWS web console or CLI. For any given session they will arrive in a vanilla Ubuntu container with passwordless sudo and can install whatever applications and frameworks might be required for that session. Because the IAM identity checking and user account population is done at container run time and the containers are called on demand, there is no delay between creating an account with a public ssh key on AWS and being able to access the bastion. If users have more than one ssh public key then their account will be set up so that any of them may be used- AWS allows up to 5 keys per user. Aside from the resources provided by AWS and remote public repositories this plan is entirely self contained. There is no reliance on registries, build chains etc. # This plan is also published on the Terraform Community Module Registry @@ -32,63 +32,97 @@ Ivan Mesic has kindly contributed an example use of this module creating a VPC a You can **specify a custom base AMI** to use for the service host if you wish with var.custom_ami_id. Tested and working using Ubuntu 18.04 as an example ;) - **Userdata has been divided into sections which are individually applicable**. Each is a HEREDOC and may be excluded by assigning any non-empty value to the relevant section variable. The value given is used simply for a logic test and not passed into userdata. If you ignore all of these variables then historic/ default behaviour continues and everything is built on the host instance on first boot (allow 3 minutes on t2.medium). +**Userdata has been divided into sections which are individually applicable**. Each is a HEREDOC and may be excluded by assigning any non-empty value to the relevant section variable. The value given is used simply for a logic test and not passed into userdata. If you ignore all of these variables then historic/ default behaviour continues and everything is built on the host instance on first boot (allow 3 minutes on t2.medium). The variables for these sections are: -* **custom_ssh_populate** - any value excludes default ssh_populate script used on container launch from userdata +- **custom_ssh_populate** - any value excludes default ssh_populate script used on container launch from userdata -* **custom_authorized_keys_command** - any value excludes default Go binary iam-authorized-keys built from source from userdata +- **custom_authorized_keys_command** - any value excludes default Go binary iam-authorized-keys built from source from userdata -* **custom_docker_setup** - any value excludes default docker installation and container build from userdata +- **custom_docker_setup** - any value excludes default docker installation and container build from userdata -* **custom_systemd** - any value excludes default systemd and hostname change from userdata +- **custom_systemd** - any value excludes default systemd and hostname change from userdata -If you exclude any section then you must replace it with equivalent functionality, either in your base AMI or extra_user_data for a working service. Especially if you are not replacing all sections then be mindful that the systemd service expects docker to be installed and to be able to call the docker container as 'sshd_worker'. The service container in turn references the 'ssh_populate' script which calls 'iam-authorized-keys' from a specific location. +- **extra_user_data*** - (optional, several variables) you may supply your own user data here - appended following above sections + +If you exclude any section then you must replace it with equivalent functionality, either in your base AMI or `extra_user_data*` for a working service. Especially if you are not replacing all sections then be mindful that the systemd service expects docker to be installed and to be able to call the docker container as `sshd_worker`. The service container in turn references the `ssh_populate` script which calls `iam-authorized-keys` from a specific location. # Ability to assume a role in another account -The ability to assume a role to source IAM users from another account has been integrated with conditional logic. If you supply the ARN for a role for the bastion service to assume (typically in another account) ${var.assume_role_arn} then this plan will create an instance profile, role and policy along with each bastion to make use of it. A matching sample policy and trust relationship is given as an output from the plan to assist with application in the other account. If you do not supply this arn then this plan presumes IAM lookups in the same account and creates an appropriate instance profile, role and policies for each bastion in the same AWS account. 'Each bastion' here refers to a combination of environment, AWS account, AWS region and VPCID determined by deployment. This is a high availabilty service, but if you are making more than one independent deployment using this same module within such a combination then you can specify "service_name" to avoid resource collision. +The ability to assume a role to source IAM users from another account has been integrated with conditional logic. If you supply the ARN for a role for the bastion service to assume (typically in another account) ${var.assume_role_arn} then this plan will create an instance profile, role and policy along with each bastion to make use of it. A matching sample policy and trust relationship is given as an output from the plan to assist with application in the other account. If you do not supply this arn then this plan presumes IAM lookups in the same account and creates an appropriate instance profile, role and policies for each bastion in the same AWS account. 'Each bastion' here refers to a combination of environment, AWS account, AWS region and VPCID determined by deployment. This is a high availability service, but if you are making more than one independent deployment using this same module within such a combination then you can specify "service_name" to avoid resource collision. -If you are seeking a solution for ECS hosts then you are recommended to the [Widdix project](https://github.com/widdix/aws-ec2-ssh). This offers IAM authentication for local users with a range of features suitable for a long-lived stateful host built as an AMI or with configuration management tools. +If you are seeking a solution for ECS hosts then you are recommended to the [Widdix project](https://github.com/widdix/aws-ec2-ssh). This offers IAM authentication for local users with a range of features suitable for a long-lived stateful host built as an AMI or with configuration management tools. # Service deployed by this plan (presuming default userdata) -This plan creates a network load balancer and autoscaling group with an **optional** DNS entry and an **optional** public IP for the service. +This plan creates a network load balancer and autoscaling group with an **optional** DNS entry and an **optional** public IP for the service. ## Default, partial and complete customisation of hostname -You can overwrite the suggested hostname entirely with `var.bastion_host_name.` - -You can _instead_ customise just the last part of the hostname if you like with `bastion_vpc_name`. By default this is the vpc ID via the magic default value of 'vpc_id' with the format - - name = "${var.environment_name}-${data.aws_region.current.name}-${var.vpc}-bastion-service.${var.dns_domain}" +You can overwrite the suggested hostname entirely with +```terraform +var.bastion_host_name +``` +You can _instead_ customise just the last part of the hostname if you like with +```terraform +bastion_vpc_name +``` +By default this is the vpc ID via the magic default value of 'vpc_id' with the format +```terraform +name = "${var.environment_name}-${data.aws_region.current.name}-${var.vpc}-bastion-service.${var.dns_domain}" +``` e.g. - module default: `dev-ap-northeast-1-vpc-1a23b456d7890-bastion-service.yourdomain.com` - -but you can pass a custom string, or an empty value to omit this. e.g. - - `bastion_vpc_name = "compute"` gives `dev-ap-northeast-1-compute-bastion-service.yourdomain.com` +module default: +```terraform +dev-ap-northeast-1-vpc-1a23b456d7890-bastion-service.yourdomain.com +``` - `bastion_vpc_name = ""` gives ` dev-ap-northeast-1-bastion-service.yourdomain.com` +but you can pass a custom string, or an empty value to omit this. e.g. +```terraform +bastion_vpc_name = "compute" +``` +gives +``` +dev-ap-northeast-1-compute-bastion-service.yourdomain.com +``` +and +```terraform +bastion_vpc_name = "" +``` +gives +``` +dev-ap-northeast-1-bastion-service.yourdomain.com +``` In any event this ensures a consistent and obvious naming format for each combination of AWS account and region that does not collide if multiple vpcs are deployed per region. -The container shell prompt is set similarly but with a systemd incremented counter, e.g. for 'aws_user' - - aws_user@dev-eu-west-1-vpc_12345688-172:~$ - +The container shell prompt is set similarly but with a systemd incremented counter, e.g. for +```terraform +aws_user +``` +you might see +```bash +aws_user@dev-eu-west-1-vpc_12345688-172:~$ +``` and a subsequent container might have - - aws_user@dev-eu-west-1-vpc_12345688-180:~$ - -In the case that `bastion_vpc_name = ""` the service container shell prompt is set similar to `you@dev-ap-northeast-1_3` +```bash +aws_user@dev-eu-west-1-vpc_12345688-180:~$ +``` +In the case that +```terraform +bastion_vpc_name = "" +``` +the service container shell prompt is set similar to +```bash +you@dev-ap-northeast-1_3 +``` # In use -It is considered normal to see very highly incremented counters if the load blancer health checks are conducted on the service port. +It is considered normal to see very highly incremented counters if the load balancer health checks are conducted on the service port. **It is essential to limit incoming service traffic to whitelisted ports.** If you do not then internet background noise will exhaust the host resources and/ or lead to rate limiting from amazon on the IAM identity calls- resulting in denial of service. @@ -117,40 +151,44 @@ Debian was chosen originally because the socket activation requires systemd but The host sshd is available on port 2222 and uses standard ec2 ssh keying. **The default login username for Debian AMI's is 'admin'**. If you do not whitelist any access to this port directly from the outside world (plan default) then it may be convenient to access from a container during development, e.g. with - sudo apt install -y curl; ssh -p2222 admin@`curl -s http://169.254.169.254/latest/meta-data/local-ipv4` - +```bash +sudo apt install -y curl; ssh -p2222 admin@`curl -s http://169.254.169.254/latest/meta-data/local-ipv4` +``` **Make sure that your agent forwarding is active before attempting this!** It is advised to deploy to production _without_ ec2 keys to increase security. -If you are interested in specifying your own AMI then be aware that there are many subtle differences in systemd implemntations between different versions, e.g. it is not possible to use Amazon Linux 2 because we need (from Systemd): +If you are interested in specifying your own AMI then be aware that there are many subtle differences in systemd implementations between different versions, e.g. it is not possible to use Amazon Linux 2 because we need (from Systemd): -* RunTimeMaxSec to limit the service container lifetime. This was introduced with Systemd version 229 (feb 2016) whereas Amazon Linux 2 uses version 219 (Feb 2015) This is a critical requirement. -* Ability to pass through hostname and increment (-- hostname foo%i) from systemd to docker, which does not appear to be supported on Amazon Linux 2. Ths is a 'nice to have' feature. +- RunTimeMaxSec to limit the service container lifetime. This was introduced with Systemd version 229 (feb 2016) whereas Amazon Linux 2 uses version 219 (Feb 2015) This is a critical requirement. +- Ability to pass through hostname and increment (-- hostname foo%i) from systemd to docker, which does not appear to be supported on Amazon Linux 2. Ths is a 'nice to have' feature. ## IAM user names and Linux user names -*with thanks to michaelwittig and the [Widdix project](https://github.com/widdix/aws-ec2-ssh)* +_with thanks to michaelwittig and the [Widdix project](https://github.com/widdix/aws-ec2-ssh)_ IAM user names may be up to 64 characters long. Linux user names may only be up to 32 characters long. Allowed characters for IAM user names are: -> alphanumeric, including the following common characters: plus (+), equal (=), comma (,), period (.), at (@), underscore (_), and hyphen (-). + +> alphanumeric, including the following common characters: plus (+), equal (=), comma (,), period (.), at (@), underscore (\_), and hyphen (-). Allowed characters for Linux user names are (POSIX ("Portable Operating System Interface for Unix") standard (IEEE Standard 1003.1 2008)): -> alphanumeric, including the following common characters: period (.), underscore (_), and hyphen (-). + +> alphanumeric, including the following common characters: period (.), underscore (\_), and hyphen (-). Therefore, characters that are allowed in IAM user names but not in Linux user names: + > plus (+), equal (=), comma (,), at (@). This solution will use the following mapping for those special characters in iam usernames when creating linux user accounts on the sshd_worker container: -* `+` => `plus` -* `=` => `equal` -* `,` => `comma` -* `@` => `at` +- `+` => `plus` +- `=` => `equal` +- `,` => `comma` +- `@` => `at` So for example if we have an iam user called `test@+=,test` (which uses all of the disputed characters) @@ -160,56 +198,59 @@ this username would translate to `testatplusequalcommatest` and they would need ## Users should be aware that: -* They are logging on _as themselves_ using an identity _based on_ their AWS IAM identity -* They must manage their own ssh keys using the AWS interface(s), e.g. in the web console under **IAM/Users/Security credentials** and 'Upload SSH public key'. -* The ssh server key is set at container build time. This means that it will change whenever the bastion host is respawned +- They are logging on _as themselves_ using an identity _based on_ their AWS IAM identity +- They must manage their own ssh keys using the AWS interface(s), e.g. in the web console under **IAM/Users/Security credentials** and 'Upload SSH public key'. +- The ssh server key is set at container build time. This means that it will change whenever the bastion host is respawned The following is referenced in "message of the day" on the container: -* They have an Ubuntu userland with passwordless sudo within the container, so they can install whatever they find useful for that session -* Every connection is given a newly instanced container, nothing persists to subsequent connections. Even if they make a second connection to the service from the same machine at the same time it will be a seperate container. -* When they close their connection that container terminates and is removed -* If they leave their connection open then the host will kill the container after 12 hours +- They have an Ubuntu userland with passwordless sudo within the container, so they can install whatever they find useful for that session +- Every connection is given a newly instanced container, nothing persists to subsequent connections. Even if they make a second connection to the service from the same machine at the same time it will be a separate container. +- When they close their connection that container terminates and is removed +- If they leave their connection open then the host will kill the container after 12 hours ## Logging The sshd-worker container is launched with `-v /dev/log:/dev/log` This causes logging information to be recorded in the host systemd journal which is not directly accessible from the container. It is thus simple to see who logged in and when by interrogating the host, e.g. - journalctl | grep 'Accepted publickey' - +```bash +journalctl | grep 'Accepted publickey' +``` gives information such as +``` +April 27 14:05:02 dev-eu-west-1-bastion-host sshd[7294]: Accepted publickey for aws_user from 192.168.168.0 port 65535 ssh2: RSA SHA256:***************************** +``` +**N.B.** It appears that calling client IP addresses are no longer visible - see [issue 45](https://github.com/joshuamkite/terraform-aws-ssh-bastion-service/issues/45#issuecomment-1019509753). The reason for this is unclear. - April 27 14:05:02 dev-eu-west-1-bastion-host sshd[7294]: Accepted publickey for aws_user from 192.168.168.0 port 65535 ssh2: RSA SHA256:***************************** - -Starting with release 3.8 it is possible to use the output giving the name of the role created for the service and to appeand addtional user data. This means that you can call this module from a plan specifiying your preferred logging solution, e.g. AWS cloudwatch. +Starting with release 3.8 it is possible to use the output giving the name of the role created for the service and to append additional user data. This means that you can call this module from a plan specifying your preferred logging solution, e.g. AWS cloudwatch. ## Note that: -* ssh keys are called only at login- if an account or ssh public key is deleted from AWS whilst a user is logged in then that session will continue until otherwise terminated. +- ssh keys are called only at login- if an account or ssh public key is deleted from AWS whilst a user is logged in then that session will continue until otherwise terminated. # Notes for deployment -Load Balancer health check port may be optionally set to either port 22 (containerised service) or port 2222 (EC2 host sshd). Port 2222 is the default. If you are deploying a large number of bastion instances, all of them checking into the same parent account for IAM queries in reponse to load balancer health checks on port 22 causes IAM rate limiting from AWS. Using the modified EC2 host sshd of port 2222 avoids this issue, is recommended for larger deployments and is now default. The host sshd is set to port 2222 as part of the service setup so this heathcheck is not entirely invalid. Security group rules, target groups and load balancer listeners are conditionally created to support any combination of access/healthcheck on port 2222 or not. +Load Balancer health check port may be optionally set to either port 22 (containerised service) or port 2222 (EC2 host sshd). Port 2222 is the default. If you are deploying a large number of bastion instances, all of them checking into the same parent account for IAM queries in response to load balancer health checks on port 22 causes IAM rate limiting from AWS. Using the modified EC2 host sshd of port 2222 avoids this issue, is recommended for larger deployments and is now default. The host sshd is set to port 2222 as part of the service setup so this healthcheck is not entirely invalid. Security group rules, target groups and load balancer listeners are conditionally created to support any combination of access/healthcheck on port 2222 or not. -You can supply list of one or more security groups to attach to the host instance launch configuration within the module if you wish. This can be supplied together with or instead of a whitelisted range of CIDR blocks. It may be useful in an enterprise setting to have security groups with rules managed separately from the bastion plan but of course if you do not assign either a suitable security group or whitelist then you may not be able to reach the service! +You can supply list of one or more security groups to attach to the host instance launch configuration within the module if you wish. This can be supplied together with or instead of a whitelisted range of CIDR blocks. It may be useful in an enterprise setting to have security groups with rules managed separately from the bastion plan but of course if you do not assign either a suitable security group or whitelist then you may not be able to reach the service! ## Components (using default userdata) **EC2 Host OS (debian) with:** -* Systemd docker unit -* Systemd service template unit -* IAM Profile connected to EC2 host -* golang -* go binary compiled from code included in plan and supplied as user data - [sourced from Fullscreen project](https://github.com/Fullscreen/iam-authorized-keys-command) +- Systemd docker unit +- Systemd service template unit +- IAM Profile connected to EC2 host +- golang +- go binary compiled from code included in plan and supplied as user data - [sourced from Fullscreen project](https://github.com/Fullscreen/iam-authorized-keys-command) **IAM Role** This and all of the following are prefixed with `${var.service_name}` to ensure uniqueness. An appropriate set is created depending on whether or not an external role to assume is referenced for IAM identity checks. -* IAM role -* IAM policies -* IAM instance profile +- IAM role +- IAM policies +- IAM instance profile **Docker container** 'sshd_worker' - built at host launch time using generic ubuntu image, we add awscli; sshd and sudo. @@ -217,31 +258,31 @@ This and all of the following are prefixed with `${var.service_name}` to ensure The files in question on the host deploy thus: - /opt - ├── golang - │ ├── bin - │ ├── pkg - │ └── src - ├── iam_helper - │ ├── iam-authorized-keys-command - │ └── ssh_populate.sh - └── sshd_worker - └── Dockerfile - -* `golang` is the source and build directory for the go binary -* `iam-helper` is made available as a read-only volume to the docker container as /opt. -* `iam-authorized-keys-command` is the Go binary that gets the users and ssh public keys from aws - it is built during bastion deployment -* `ssh_populate.sh` is the container entry point and populates the local user accounts using the go binary -* `sshd_worker/Dockerfile` is obviously the docker build configuration. It uses Ubuntu 16.04/18.04 from the public Docker registry and installs additional public packages. + /opt + ├── golang + │ ├── bin + │ ├── pkg + │ └── src + ├── iam_helper + │ ├── iam-authorized-keys-command + │ └── ssh_populate.sh + └── sshd_worker + └── Dockerfile + +- `golang` is the source and build directory for the go binary +- `iam-helper` is made available as a read-only volume to the docker container as /opt. +- `iam-authorized-keys-command` is the Go binary that gets the users and ssh public keys from aws - it is built during bastion deployment +- `ssh_populate.sh` is the container entry point and populates the local user accounts using the go binary +- `sshd_worker/Dockerfile` is obviously the docker build configuration. It uses Ubuntu 16.04/18.04 from the public Docker registry and installs additional public packages. ## Sample policy for other accounts - + If you supply the ARN for an external role for the bastion service to assume `${var.assume_role_arn}` then a matching sample policy and trust relationship is given as an output from the plan to assist with application in that other account for typical operation. The DNS entry (if created) for the service is also displayed as an output of the format - - name = "${var.environment_name}-${data.aws_region.current.name}-${var.vpc}-bastion-service.${var.dns_domain}" - +```terraform +name = "${var.environment_name}-${data.aws_region.current.name}-${var.vpc}-bastion-service.${var.dns_domain}" +``` ## Inputs and Outputs These have been generated with [terraform-docs](https://github.com/segmentio/terraform-docs) @@ -250,70 +291,112 @@ These have been generated with [terraform-docs](https://github.com/segmentio/ter | Name | Version | |------|---------| -| terraform | >= 0.13 | +| [terraform](#requirement\_terraform) | >= 0.15 | ## Providers | Name | Version | |------|---------| -| aws | n/a | -| null | n/a | -| template | n/a | +| [aws](#provider\_aws) | 3.71.0 | +| [cloudinit](#provider\_cloudinit) | 2.2.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_autoscaling_group.bastion-service](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource | +| [aws_iam_instance_profile.bastion_service_assume_role_profile](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | +| [aws_iam_instance_profile.bastion_service_profile](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | +| [aws_iam_policy.bastion_service_assume_role_in_parent](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.check_ssh_authorized_keys](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.bastion_service_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.bastion_service_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.bastion_service_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.check_ssh_authorized_keys](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_launch_template.bastion-service-host](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | +| [aws_lb.bastion-service](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb) | resource | +| [aws_lb_listener.bastion-host](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener) | resource | +| [aws_lb_listener.bastion-service](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener) | resource | +| [aws_lb_target_group.bastion-host](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_target_group) | resource | +| [aws_lb_target_group.bastion-service](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_target_group) | resource | +| [aws_route53_record.bastion_service](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_security_group.bastion_service](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.bastion_host_out](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.host_ssh_in_cond](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.lb_healthcheck_in](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.service_ssh_in](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_ami.debian](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_default_tags.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/default_tags) | data source | +| [aws_iam_policy_document.bastion_service_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.bastion_service_assume_role_in_parent](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.bastion_service_role_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.check_ssh_authorized_keys](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | +| [aws_subnet.lb_subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | +| [cloudinit_config.config](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| asg\_desired | Desired numbers of bastion-service hosts in ASG | `string` | `"1"` | no | -| asg\_max | Max numbers of bastion-service hosts in ASG | `string` | `"2"` | no | -| asg\_min | Min numbers of bastion-service hosts in ASG | `string` | `"1"` | no | -| assume\_role\_arn | arn for role to assume in separate identity account if used | `string` | `""` | no | -| aws\_profile | n/a | `string` | `""` | no | -| aws\_region | n/a | `any` | n/a | yes | -| bastion\_allowed\_iam\_group | Name IAM group, members of this group will be able to ssh into bastion instances if they have provided ssh key in their profile | `string` | `""` | no | -| bastion\_host\_name | The hostname to give to the bastion instance | `string` | `""` | no | -| bastion\_instance\_type | The virtual hardware to be used for the bastion service host | `string` | `"t2.micro"` | no | -| bastion\_service\_host\_key\_name | AWS ssh key \*.pem to be used for ssh access to the bastion service host | `string` | `""` | no | -| bastion\_vpc\_name | define the last part of the hostname, by default this is the vpc ID with magic default value of 'vpc\_id' but you can pass a custom string, or an empty value to omit this | `string` | `"vpc_id"` | no | -| cidr\_blocks\_whitelist\_host | range(s) of incoming IP addresses to whitelist for the HOST | `list(string)` | `[]` | no | -| cidr\_blocks\_whitelist\_service | range(s) of incoming IP addresses to whitelist for the SERVICE | `list(string)` | `[]` | no | -| container\_ubuntu\_version | ubuntu version to use for service container. Tested with 16.04; 18.04; 20.04 | `string` | `"20.04"` | no | -| custom\_ami\_id | id for custom ami if used | `string` | `""` | no | -| custom\_authorized\_keys\_command | any value excludes default Go binary iam-authorized-keys built from source from userdata | `string` | `""` | no | -| custom\_docker\_setup | any value excludes default docker installation and container build from userdata | `string` | `""` | no | -| custom\_ssh\_populate | any value excludes default ssh\_populate script used on container launch from userdata | `string` | `""` | no | -| custom\_systemd | any value excludes default systemd and hostname change from userdata | `string` | `""` | no | -| dns\_domain | The domain used for Route53 records | `string` | `""` | no | -| environment\_name | the name of the environment that we are deploying to, used in tagging. Overwritten if var.service\_name and var.bastion\_host\_name values are changed | `string` | `"staging"` | no | -| extra\_user\_data\_content | Extra user-data to add to the default built-in | `string` | `""` | no | -| extra\_user\_data\_content\_type | What format is content in - eg 'text/cloud-config' or 'text/x-shellscript' | `string` | `"text/x-shellscript"` | no | -| extra\_user\_data\_merge\_type | Control how cloud-init merges user-data sections | `string` | `"str(append)"` | no | -| lb\_healthcheck\_port | TCP port to conduct lb target group healthchecks. Acceptable values are 22 or 2222 | `string` | `"2222"` | no | -| lb\_healthy\_threshold | Healthy threshold for lb target group | `string` | `"2"` | no | -| lb\_interval | interval for lb target group health check | `string` | `"30"` | no | -| lb\_is\_internal | whether the lb will be internal | `string` | `false` | no | -| lb\_unhealthy\_threshold | Unhealthy threshold for lb target group | `string` | `"2"` | no | -| public\_ip | Associate a public IP with the host instance when launching | `bool` | `false` | no | -| route53\_fqdn | If creating a public DNS entry with this module then you may override the default constructed DNS entry by supplying a fully qualified domain name here which will be used verbatim | `string` | `""` | no | -| route53\_zone\_id | Route53 zoneId | `string` | `""` | no | -| security\_groups\_additional | additional security group IDs to attach to host instance | `list(string)` | `[]` | no | -| service\_name | Unique name per vpc for associated resources- set to some non-default value for multiple deployments per vpc | `string` | `"bastion-service"` | no | -| subnets\_asg | list of subnets for autoscaling group - availability zones must match subnets\_lb | `list(string)` | `[]` | no | -| subnets\_lb | list of subnets for load balancer - availability zones must match subnets\_asg | `list(string)` | `[]` | no | -| tags | AWS tags that should be associated with created resources | `map(string)` | `{}` | no | -| vpc | ID for Virtual Private Cloud to apply security policy and deploy stack to | `any` | n/a | yes | +| [asg\_desired](#input\_asg\_desired) | Desired numbers of bastion-service hosts in ASG | `string` | `"1"` | no | +| [asg\_max](#input\_asg\_max) | Max numbers of bastion-service hosts in ASG | `string` | `"2"` | no | +| [asg\_min](#input\_asg\_min) | Min numbers of bastion-service hosts in ASG | `string` | `"1"` | no | +| [assume\_role\_arn](#input\_assume\_role\_arn) | arn for role to assume in separate identity account if used | `string` | `""` | no | +| [autoscaling\_group\_enabled\_metrics](#input\_autoscaling\_group\_enabled\_metrics) | A list of CloudWatch metrics to collect on the autoscaling group. Permitted values include: GroupMinSize; GroupMaxSize; GroupDesiredCapacity; GroupInServiceInstances; GroupPendingInstances; GroupStandbyInstances; GroupTerminatingInstances; GroupTotalInstances | `list(string)` | `[]` | no | +| [aws\_profile](#input\_aws\_profile) | n/a | `string` | `""` | no | +| [aws\_region](#input\_aws\_region) | n/a | `any` | n/a | yes | +| [bastion\_allowed\_iam\_group](#input\_bastion\_allowed\_iam\_group) | Name IAM group, members of this group will be able to ssh into bastion instances if they have provided ssh key in their profile | `string` | `""` | no | +| [bastion\_ebs\_device\_name](#input\_bastion\_ebs\_device\_name) | Name of bastion instance block device | `string` | `"xvda"` | no | +| [bastion\_ebs\_size](#input\_bastion\_ebs\_size) | Size of EBS attached to the bastion instance | `number` | `8` | no | +| [bastion\_host\_name](#input\_bastion\_host\_name) | The hostname to give to the bastion instance | `string` | `""` | no | +| [bastion\_instance\_types](#input\_bastion\_instance\_types) | List of ec2 types for the bastion host, used by aws\_launch\_template (first from the list) and in aws\_autoscaling\_group | `list` |
[| no | +| [bastion\_service\_host\_key\_name](#input\_bastion\_service\_host\_key\_name) | AWS ssh key *.pem to be used for ssh access to the bastion service host | `string` | `""` | no | +| [bastion\_vpc\_name](#input\_bastion\_vpc\_name) | define the last part of the hostname, by default this is the vpc ID with magic default value of 'vpc\_id' but you can pass a custom string, or an empty value to omit this | `string` | `"vpc_id"` | no | +| [cidr\_blocks\_whitelist\_host](#input\_cidr\_blocks\_whitelist\_host) | range(s) of incoming IP addresses to whitelist for the HOST | `list(string)` | `[]` | no | +| [cidr\_blocks\_whitelist\_service](#input\_cidr\_blocks\_whitelist\_service) | range(s) of incoming IP addresses to whitelist for the SERVICE | `list(string)` | `[]` | no | +| [container\_ubuntu\_version](#input\_container\_ubuntu\_version) | ubuntu version to use for service container. Tested with 16.04; 18.04; 20.04 | `string` | `"20.04"` | no | +| [custom\_ami\_id](#input\_custom\_ami\_id) | id for custom ami if used | `string` | `""` | no | +| [custom\_authorized\_keys\_command](#input\_custom\_authorized\_keys\_command) | any value excludes default Go binary iam-authorized-keys built from source from userdata | `string` | `""` | no | +| [custom\_docker\_setup](#input\_custom\_docker\_setup) | any value excludes default docker installation and container build from userdata | `string` | `""` | no | +| [custom\_ssh\_populate](#input\_custom\_ssh\_populate) | any value excludes default ssh\_populate script used on container launch from userdata | `string` | `""` | no | +| [custom\_systemd](#input\_custom\_systemd) | any value excludes default systemd and hostname change from userdata | `string` | `""` | no | +| [delete\_network\_interface\_on\_termination](#input\_delete\_network\_interface\_on\_termination) | if network interface created for bastion host should be deleted when instance in terminated. Setting propagated to aws\_launch\_template.network\_interfaces.delete\_on\_termination | `bool` | `true` | no | +| [dns\_domain](#input\_dns\_domain) | The domain used for Route53 records | `string` | `""` | no | +| [environment\_name](#input\_environment\_name) | the name of the environment that we are deploying to, used in tagging. Overwritten if var.service\_name and var.bastion\_host\_name values are changed | `string` | `"staging"` | no | +| [extra\_user\_data\_content](#input\_extra\_user\_data\_content) | Extra user-data to add to the default built-in | `string` | `""` | no | +| [extra\_user\_data\_content\_type](#input\_extra\_user\_data\_content\_type) | What format is content in - eg 'text/cloud-config' or 'text/x-shellscript' | `string` | `"text/x-shellscript"` | no | +| [extra\_user\_data\_merge\_type](#input\_extra\_user\_data\_merge\_type) | Control how cloud-init merges user-data sections | `string` | `"str(append)"` | no | +| [lb\_healthcheck\_port](#input\_lb\_healthcheck\_port) | TCP port to conduct lb target group healthchecks. Acceptable values are 22 or 2222 | `string` | `"2222"` | no | +| [lb\_healthy\_threshold](#input\_lb\_healthy\_threshold) | Healthy threshold for lb target group | `string` | `"2"` | no | +| [lb\_interval](#input\_lb\_interval) | interval for lb target group health check | `string` | `"30"` | no | +| [lb\_is\_internal](#input\_lb\_is\_internal) | whether the lb will be internal | `string` | `false` | no | +| [lb\_unhealthy\_threshold](#input\_lb\_unhealthy\_threshold) | Unhealthy threshold for lb target group | `string` | `"2"` | no | +| [on\_demand\_base\_capacity](#input\_on\_demand\_base\_capacity) | allows a base level of on demand when using spot | `number` | `0` | no | +| [public\_ip](#input\_public\_ip) | Associate a public IP with the host instance when launching | `bool` | `false` | no | +| [route53\_fqdn](#input\_route53\_fqdn) | If creating a public DNS entry with this module then you may override the default constructed DNS entry by supplying a fully qualified domain name here which will be used verbatim | `string` | `""` | no | +| [route53\_zone\_id](#input\_route53\_zone\_id) | Route53 zoneId | `string` | `""` | no | +| [security\_groups\_additional](#input\_security\_groups\_additional) | additional security group IDs to attach to host instance | `list(string)` | `[]` | no | +| [service\_name](#input\_service\_name) | Unique name per vpc for associated resources- set to some non-default value for multiple deployments per vpc | `string` | `"bastion-service"` | no | +| [subnets\_asg](#input\_subnets\_asg) | list of subnets for autoscaling group - availability zones must match subnets\_lb | `list(string)` | `[]` | no | +| [subnets\_lb](#input\_subnets\_lb) | list of subnets for load balancer - availability zones must match subnets\_asg | `list(string)` | `[]` | no | +| [tags](#input\_tags) | AWS tags that should be associated with created resources | `map(string)` | `{}` | no | +| [vpc](#input\_vpc) | ID for Virtual Private Cloud to apply security policy and deploy stack to | `any` | n/a | yes | ## Outputs | Name | Description | |------|-------------| -| bastion\_service\_assume\_role\_name | role created for service host asg - if created with assume role | -| bastion\_service\_role\_name | role created for service host asg - if created without assume role | -| bastion\_sg\_id | Security Group id of the bastion host | -| lb\_arn | aws load balancer arn | -| lb\_dns\_name | aws load balancer dns | -| lb\_zone\_id | n/a | -| policy\_example\_for\_parent\_account\_empty\_if\_not\_used | You must apply an IAM policy with trust relationship identical or compatible with this in your other AWS account for IAM lookups to function there with STS:AssumeRole and allow users to login | -| service\_dns\_entry | dns-registered url for service and host | -| target\_group\_arn | aws load balancer target group arn | - +| [bastion\_service\_assume\_role\_name](#output\_bastion\_service\_assume\_role\_name) | role created for service host asg - if created with assume role | +| [bastion\_service\_role\_name](#output\_bastion\_service\_role\_name) | role created for service host asg - if created without assume role | +| [bastion\_sg\_id](#output\_bastion\_sg\_id) | Security Group id of the bastion host | +| [lb\_arn](#output\_lb\_arn) | aws load balancer arn | +| [lb\_dns\_name](#output\_lb\_dns\_name) | aws load balancer dns | +| [lb\_zone\_id](#output\_lb\_zone\_id) | n/a | +| [policy\_example\_for\_parent\_account\_empty\_if\_not\_used](#output\_policy\_example\_for\_parent\_account\_empty\_if\_not\_used) | You must apply an IAM policy with trust relationship identical or compatible with this in your other AWS account for IAM lookups to function there with STS:AssumeRole and allow users to login | +| [service\_dns\_entry](#output\_service\_dns\_entry) | dns-registered url for service and host | +| [target\_group\_arn](#output\_target\_group\_arn) | aws load balancer target group arn | diff --git a/changelog.md b/changelog.md index a376dd6..63a89e9 100644 --- a/changelog.md +++ b/changelog.md @@ -1,4 +1,35 @@ -**N.B.** +# 7.0 + +**Breaking changes with existing deployments using earlier module versions** + +- **Change:** Retire deprecated null-resource provider +- **Change:** Retire deprecated template provider (required for darwin_arm64). Fixes [Issue #51](https://github.com/joshuamkite/terraform-aws-ssh-bastion-service/issues/51) +- **Feature:** Support provider default tags as well as explicit tags for all supported resources plus autoscaling group +- **Change:** Update Terraform version to >/=0.15.x/1.0.0 +- **Change/Feature:** Change from Launch Configuration to Launch Template. Includes support for spot instances. Fixes [Issue #46](https://github.com/joshuamkite/terraform-aws-ssh-bastion-service/issues/51) +- **Feature:** Enable setting cloudwatch metrics for autoscaling group +- **Feature:** Add unique target group name for bastion host elb +- **Feature:** Enable setting EBS size and name for bastion instance +- **Bugfix:** Update formatting in readme +- **Bugfix:** Spellcheck readme and changelog +- **Change:** Update terraform-docs outputs on documentation +- **Change:** Update internal filenames and references to comply with [upstream template file naming conventions](https://www.terraform.io/language/functions/templatefile); move locals to locals.tf + +# 6.1 + +**Change:** Update readme to specify that we are targeting terraform 13 + +# 6.0 + +**Change:** Major version increment as changes accommodate major version increments for both AWS provider and Terraform itself + +**Change:** Terraform 0.13 using `terraform 0.13upgrade` linting and provider specification + +**Change:** Terraform AWS provider 3.0.0 - `vpc_zone_identifier` argument now conflicts with `vpc_zone_identifier` in Resource: `aws_autoscaling_group` + +**Feature:** Single deployment of included example, accommodating above changes + +**Change:** `var.aws_profile` is now defaulted to `""` as it is only used for sample policies output # 5.1 @@ -75,11 +106,11 @@ The tags given in var.tags are rendered to the Autoscaling group as before # 4.3 -**Feature:** You can now specify a list of one or more security groups to attach to the host instance launch configuration. This can be supplied together with or instead of a whitelisted range of CIDR blocks. **N.B. This is _not_ aws_security_group_rule/source_security_group_id!** If you wish to append your own 'security_group_id' rules then you will need to attach these from a plan caling this module (using output "bastion_sg_id") or as part of a separate security group which you then attach. +**Feature:** You can now specify a list of one or more security groups to attach to the host instance launch configuration. This can be supplied together with or instead of a whitelisted range of CIDR blocks. **N.B. This is _not_ aws_security_group_rule/source_security_group_id!** If you wish to append your own 'security_group_id' rules then you will need to attach these from a plan calling this module (using output "bastion_sg_id") or as part of a separate security group which you then attach. It may be useful in an enterprise setting to have security groups with rules managed separately from the bastion plan but of course if you do not assign a suitable security group or whitelist then you may not be able to reach the service! -**Change:** The code has been DRYed significantly in locals.tf (to remove unused logic evaluations) and main.tf (to condense 2 seperate aws_launch_configuration and aws_autoscaling_group blocks into one each). This makes code maintenence much easier and less error prone **BUT** it does mean that these resources are now 'new' so if you are deploying over an older version of this plan then you can expect them to be recreated - as lifecycle 'create before destroy' is specified, deployment will be a bit longer but downtime should be brief. +**Change:** The code has been DRYed significantly in locals.tf (to remove unused logic evaluations) and main.tf (to condense 2 separate aws_launch_configuration and aws_autoscaling_group blocks into one each). This makes code maintenance much easier and less error prone **BUT** it does mean that these resources are now 'new' so if you are deploying over an older version of this plan then you can expect them to be recreated - as lifecycle 'create before destroy' is specified, deployment will be a bit longer but downtime should be brief. **Bugfix:** Previously the Golang code used for obtaining users and ssh public keys limited the number of users returned to 100 _if_ an IAM group was specified. This has now been increased to 1000 and the code change accepted upstream. @@ -112,11 +143,11 @@ If you exclude any section then you must replace it with equivalent functionalit **Feature:** Move from Classic Load Balancer to Network Load Balancer. * elb_idle_timeout and elb_timeout variables have been removed as they are not supported in this configuration. -* Configurable load balancer variables naming now prefixed 'lb'. Unfortunately the change in load balancer type breaks backward compatibilty with deployments using earlier versions of this module anyway so the opportunity is being taken to update the variable names for future sanity. +* Configurable load balancer variables naming now prefixed 'lb'. Unfortunately the change in load balancer type breaks backward compatibility with deployments using earlier versions of this module anyway so the opportunity is being taken to update the variable names for future sanity. **Feature:** Security group rules apply 'description' tag -**Change:** New code now in seperate files to assist readabilty. locals also moved to seperate file. +**Change:** New code now in separate files to assist readability. locals also moved to separate file. **Change:** Security group name for EC2 instance now name_prefix and simplified @@ -142,9 +173,9 @@ If you exclude any section then you must replace it with equivalent functionalit # 3.7 -**Feature:** ELB health check port may be optionally set to either port 22 (containerised service; default) or port 2222 (EC2 host sshd). If you are deploying a large number of bastion instances, all of them checking into the same parent account for IAM queries in reponse to load balancer health checks on port 22 causes IAM rate limiting from AWS. Using the modified EC2 host sshd of port 2222 avoids this issue and is recommended for larger deployments. The host sshd is set to port 2222 as part of the service setup so this heathcheck is not entirely invalid. Security group rules are conditionally created to support any combination of access/healthceck on port 2222 or not. +**Feature:** ELB health check port may be optionally set to either port 22 (containerised service; default) or port 2222 (EC2 host sshd). If you are deploying a large number of bastion instances, all of them checking into the same parent account for IAM queries in response to load balancer health checks on port 22 causes IAM rate limiting from AWS. Using the modified EC2 host sshd of port 2222 avoids this issue and is recommended for larger deployments. The host sshd is set to port 2222 as part of the service setup so this healthcheck is not entirely invalid. Security group rules are conditionally created to support any combination of access/healthcheck on port 2222 or not. -**Feature:** Friendlier DNS and hostnaming. You can now define the last part of the hostname. By default this is the vpc ID via the magic default value of 'vpc_id' but you can pass a custom string, or an empty value to omit this. e.g. +**Feature:** Friendlier DNS and host naming. You can now define the last part of the hostname. By default this is the vpc ID via the magic default value of 'vpc_id' but you can pass a custom string, or an empty value to omit this. e.g. module default: `dev-ap-northeast-1-vpc-1a23b456d7890-bastion-service.yourdomain.com` @@ -167,11 +198,11 @@ If you exclude any section then you must replace it with equivalent functionalit **Feature:** New output: bastion_sg_id gives the Security Group id of the bastion host which may be useful for other services -**Documentation:** update readme to reflect new ouptputs and names; acknowledgements +**Documentation:** update readme to reflect new outputs and names; acknowledgements # 3.5 (broken, withdrawn) -**Bugfix:** Remove parentheses from the name of the sample policy ouptut to make it parsable when called from module +**Bugfix:** Remove parentheses from the name of the sample policy output to make it parsable when called from module # 3.4 (broken, withdrawn) @@ -189,15 +220,15 @@ If you exclude any section then you must replace it with equivalent functionalit # 3.1 -**Feature (backward compatible):** Improvements to example asssume role policy generation - making it easier to copy and paste from Terraform output to AWS web console +**Feature (backward compatible):** Improvements to example assume role policy generation - making it easier to copy and paste from Terraform output to AWS web console # 3.0 -With version 3 series (backward compatible with version 2) the ability to assume a role in another account has now been integrated with conditional logic. If you supply the ARN for a role for the bastion service to assume in another account ${var.assume_role_arn} then this plan will create an instance profile, role and policy along with each bastion to make use of it. A matching sample policy and trust relationship is given as an output from the plan to assist with application in the other account. If you do not supply this arn then this plan presumes IAM lookups in the same account and creates an appropriate instance profile, role and policies for each bastion in the same AWS account. 'Each bastion' here refers to a combination of environment, AWS account, AWS region and VPCID determined by deployment. Since this is a high availabilty service, it is not envisaged that there would be reason for more than one independent deployment within such a combination. +With version 3 series (backward compatible with version 2) the ability to assume a role in another account has now been integrated with conditional logic. If you supply the ARN for a role for the bastion service to assume in another account ${var.assume_role_arn} then this plan will create an instance profile, role and policy along with each bastion to make use of it. A matching sample policy and trust relationship is given as an output from the plan to assist with application in the other account. If you do not supply this arn then this plan presumes IAM lookups in the same account and creates an appropriate instance profile, role and policies for each bastion in the same AWS account. 'Each bastion' here refers to a combination of environment, AWS account, AWS region and VPCID determined by deployment. Since this is a high availability service, it is not envisaged that there would be reason for more than one independent deployment within such a combination. Also with version 3 the IAM policy generation and user data have been moved from modules back into the main plan. User data is no longer displayed. -If you are seeking a solution for ECS hosts then you are recommended to either the [Widdix project]((https://github.com/widdix/aws-ec2-ssh)) directly or my [Ansible-galaxy respin of it](https://galaxy.ansible.com/joshuamkite/aws-ecs-iam-users-tags/). This offers a range of features, suitable for a long-lived stateful host built. +If you are seeking a solution for ECS hosts then you are recommended to either the [Widdix project]((https://github.com/widdix/aws-ec2-ssh)) directly or my [Ansible-galaxy re-spin of it](https://galaxy.ansible.com/joshuamkite/aws-ecs-iam-users-tags/). This offers a range of features, suitable for a long-lived stateful host built. # 2.0 diff --git a/examples/full-with-public-ip/README.md b/examples/full-with-public-ip/README.md index 7f31a8b..f9449bd 100644 --- a/examples/full-with-public-ip/README.md +++ b/examples/full-with-public-ip/README.md @@ -4,18 +4,52 @@ This example shows a complete setup for a new `bastion` service with all needed * private subnet(s) inside the VPC, * an internet gateway and route tables. -To create the bastion service, subnets need to already exist -This is currently a limitation of Terraform: https://github.com/hashicorp/terraform/issues/12570 -Since Terraform version 0.12.0 you can either: -Comment out the bastion service, apply, uncomment and apply again (as for Terraform 0.11.x) -Or simply run the plan twice - first time will give an error like below, simply run again +## Requirements - Error: Provider produced inconsistent final plan +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13 | - When expanding the plan for - module.ssh-bastion-service.aws_autoscaling_group.bastion-service to include - new values learned so far during apply, provider "aws" produced an invalid new - value for .availability_zones: was known, but now unknown. +## Providers - This is a bug in the provider, which should be reported in the provider's own - issue tracker. +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | 3.73.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [ssh-bastion-service](#module\_ssh-bastion-service) | joshuamkite/ssh-bastion-service/aws | n/a | + +## Resources + +| Name | Type | +|------|------| +| [aws_internet_gateway.bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway) | resource | +| [aws_route.bastion-ipv4-out](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource | +| [aws_route_table.bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table_association.bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | +| [aws_subnet.bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_vpc.bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | Default AWS region | `string` | `"eu-west-1"` | no | +| [cidr-start](#input\_cidr-start) | Default CIDR block | `string` | `"10.50"` | no | +| [environment\_name](#input\_environment\_name) | n/a | `string` | `"demo"` | no | +| [everyone-cidr](#input\_everyone-cidr) | Everyone | `string` | `"0.0.0.0/0"` | no | +| [tags](#input\_tags) | tags aplied to all resources | `map(string)` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [bastion\_service\_role\_name](#output\_bastion\_service\_role\_name) | role created for service host asg - if created without assume role | +| [bastion\_sg\_id](#output\_bastion\_sg\_id) | Security Group id of the bastion host | +| [lb\_arn](#output\_lb\_arn) | aws load balancer arn | +| [lb\_dns\_name](#output\_lb\_dns\_name) | aws load balancer dns | +| [lb\_zone\_id](#output\_lb\_zone\_id) | n/a | diff --git a/examples/full-with-public-ip/main.tf b/examples/full-with-public-ip/main.tf index d854bb2..755bed4 100644 --- a/examples/full-with-public-ip/main.tf +++ b/examples/full-with-public-ip/main.tf @@ -1,5 +1,9 @@ + provider "aws" { - region = var.aws-region + region = var.aws_region + default_tags { + tags = local.default_tags + } } data "aws_availability_zones" "available" { @@ -8,38 +12,21 @@ data "aws_availability_zones" "available" { resource "aws_vpc" "bastion" { cidr_block = "${var.cidr-start}.0.0/16" enable_dns_hostnames = true - - tags = { - Name = "bastion-${var.environment-name}-vpc" - } } resource "aws_subnet" "bastion" { - count = 1 - + count = 1 availability_zone = data.aws_availability_zones.available.names[count.index] cidr_block = "${var.cidr-start}.${count.index}.0/24" vpc_id = aws_vpc.bastion.id - - tags = { - Name = "bastion-${var.environment-name}-subnet-${count.index}" - } } resource "aws_internet_gateway" "bastion" { vpc_id = aws_vpc.bastion.id - - tags = { - Name = "bastion-${var.environment-name}-ig" - } } resource "aws_route_table" "bastion" { vpc_id = aws_vpc.bastion.id - - tags = { - Name = "bastion-${var.environment-name}-rt" - } } resource "aws_route" "bastion-ipv4-out" { @@ -49,8 +36,7 @@ resource "aws_route" "bastion-ipv4-out" { } resource "aws_route_table_association" "bastion" { - count = 1 - + count = 1 subnet_id = aws_subnet.bastion[count.index].id route_table_id = aws_route_table.bastion.id } @@ -63,8 +49,8 @@ variable "everyone-cidr" { module "ssh-bastion-service" { source = "joshuamkite/ssh-bastion-service/aws" # source = "../../" - aws_region = var.aws-region - environment_name = var.environment-name + aws_region = var.aws_region + environment_name = var.environment_name vpc = aws_vpc.bastion.id subnets_asg = flatten([aws_subnet.bastion.*.id]) subnets_lb = flatten([aws_subnet.bastion.*.id]) @@ -75,4 +61,5 @@ module "ssh-bastion-service" { aws_subnet.bastion, aws_internet_gateway.bastion, ] -} \ No newline at end of file + bastion_instance_types = ["t2.micro"] +} diff --git a/examples/full-with-public-ip/vars.tf b/examples/full-with-public-ip/vars.tf index 69b4c22..e9c4deb 100644 --- a/examples/full-with-public-ip/vars.tf +++ b/examples/full-with-public-ip/vars.tf @@ -1,4 +1,4 @@ -variable "aws-region" { +variable "aws_region" { default = "eu-west-1" description = "Default AWS region" } @@ -8,6 +8,18 @@ variable "cidr-start" { description = "Default CIDR block" } -variable "environment-name" { +variable "environment_name" { default = "demo" } + +variable "tags" { + type = map(string) + description = "tags aplied to all resources" + default = {} +} + +locals { + default_tags = { + Name = "bastion-service-${var.environment_name}" + } +} diff --git a/load_balancer.tf b/load_balancer.tf index aea4ed2..2a7479f 100644 --- a/load_balancer.tf +++ b/load_balancer.tf @@ -3,7 +3,7 @@ ####################################################### resource "aws_lb" "bastion-service" { - name = md5(format("${var.service_name}-%s", var.vpc)) + name = "${var.service_name}-${var.environment_name}" load_balancer_type = "network" internal = var.lb_is_internal subnets = var.subnets_lb @@ -46,7 +46,7 @@ resource "aws_lb_listener" "bastion-host" { # Target group service ####################################################### resource "aws_lb_target_group" "bastion-service" { - name = md5(format("${var.service_name}-%s", var.vpc)) + name = "${var.service_name}-${var.environment_name}-22" protocol = "TCP" port = 22 vpc_id = var.vpc @@ -67,7 +67,7 @@ resource "aws_lb_target_group" "bastion-service" { ####################################################### resource "aws_lb_target_group" "bastion-host" { count = local.hostport_whitelisted ? 1 : 0 - name = "bastion-host" + name = "${var.service_name}-${var.environment_name}-2222" protocol = "TCP" port = 2222 vpc_id = var.vpc @@ -82,4 +82,3 @@ resource "aws_lb_target_group" "bastion-host" { tags = var.tags } - diff --git a/locals.tf b/locals.tf index 70cef71..3e8da79 100644 --- a/locals.tf +++ b/locals.tf @@ -28,8 +28,9 @@ locals { # Logic tests for assume role vs same account ########################## locals { - assume_role_yes = var.assume_role_arn != "" ? 1 : 0 - assume_role_no = var.assume_role_arn == "" ? 1 : 0 + assume_role_yes = var.assume_role_arn != "" ? 1 : 0 + assume_role_no = var.assume_role_arn == "" ? 1 : 0 + assume_role_yes_bool = var.assume_role_arn != "" ? true : false } ########################## @@ -37,9 +38,9 @@ locals { ########################## locals { custom_ssh_populate_no = var.custom_ssh_populate == "" ? 1 : 0 - custom_authorized_keys_command_no = var.custom_authorized_keys_command == "" ? 1 : 0 - custom_docker_setup_no = var.custom_docker_setup == "" ? 1 : 0 - custom_systemd_no = var.custom_systemd == "" ? 1 : 0 + custom_authorized_keys_command_no = var.custom_authorized_keys_command == "" ? true : false + custom_docker_setup_no = var.custom_docker_setup == "" ? true : false + custom_systemd_no = var.custom_systemd == "" ? true : false } ########################## @@ -66,3 +67,36 @@ locals { route53_name_components = "${local.bastion_host_name}-${var.service_name}.${var.dns_domain}" } + +############################ +# User Data Templates +############################ +locals { + systemd = templatefile("${path.module}/user_data/systemd.tftpl", { + bastion_host_name = local.bastion_host_name + vpc = var.vpc + }) + ssh_populate_assume_role = templatefile("${path.module}/user_data/ssh_populate_assume_role.tftpl", { + "assume_role_arn" = var.assume_role_arn + }) + ssh_populate_same_account = file("${path.module}/user_data/ssh_populate_same_account.tftpl") + docker_setup = templatefile("${path.module}/user_data/docker_setup.tftpl", { + "container_ubuntu_version" = var.container_ubuntu_version + }) + iam_authorized_keys_command = templatefile("${path.module}/user_data/iam-authorized-keys-command.tftpl", { + "authorized_command_code" = file("${path.module}/user_data/iam_authorized_keys_code/main.go") + "bastion_allowed_iam_group" = var.bastion_allowed_iam_group + }) +} + +#################################################### +# sample policy for parent account +################################################### +locals { + sample_policies_for_parent_account = templatefile("${path.module}/sts_assumerole_example/policy_example.tftpl", { + aws_profile = var.aws_profile + bastion_allowed_iam_group = var.bastion_allowed_iam_group + assume_role_arn = var.assume_role_arn + } + ) +} diff --git a/main.tf b/main.tf index 3a91054..536207e 100755 --- a/main.tf +++ b/main.tf @@ -17,65 +17,107 @@ data "aws_ami" "debian" { owners = ["379101102735"] # Debian } + ############################ -#Launch configuration for service host +#Launch template for service host ############################ -resource "aws_launch_configuration" "bastion-service-host" { - name_prefix = "${var.service_name}-host" +resource "aws_launch_template" "bastion-service-host" { + name_prefix = "${var.service_name}-host-${var.environment_name}" image_id = local.bastion_ami_id - instance_type = var.bastion_instance_type - iam_instance_profile = element( - concat( - aws_iam_instance_profile.bastion_service_assume_role_profile.*.arn, - aws_iam_instance_profile.bastion_service_profile.*.arn, - ), - 0, - ) - associate_public_ip_address = var.public_ip - security_groups = concat( - [aws_security_group.bastion_service.id], - var.security_groups_additional - ) - user_data = data.template_cloudinit_config.config.rendered - key_name = var.bastion_service_host_key_name + instance_type = var.bastion_instance_types[0] + key_name = var.bastion_service_host_key_name + user_data = base64encode(data.cloudinit_config.config.rendered) + + iam_instance_profile { + name = element( + concat( + aws_iam_instance_profile.bastion_service_assume_role_profile.*.name, + aws_iam_instance_profile.bastion_service_profile.*.name, + ), + 0, + ) + } + + network_interfaces { + associate_public_ip_address = var.public_ip + delete_on_termination = var.delete_network_interface_on_termination + security_groups = concat( + [aws_security_group.bastion_service.id], + var.security_groups_additional + ) + } + + block_device_mappings { + device_name = var.bastion_ebs_device_name + + ebs { + volume_size = var.bastion_ebs_size + volume_type = "gp2" + delete_on_termination = "true" + } + } lifecycle { create_before_destroy = true } + tags = var.tags } ####################################################### # ASG section ####################################################### -data "null_data_source" "asg-tags" { - count = length(keys(var.tags)) +data "aws_default_tags" "this" {} - inputs = { - key = element(keys(var.tags), count.index) - value = element(values(var.tags), count.index) - propagate_at_launch = true - } -} resource "aws_autoscaling_group" "bastion-service" { - name_prefix = "${var.service_name}-asg" - max_size = var.asg_max - min_size = var.asg_min - desired_capacity = var.asg_desired - launch_configuration = aws_launch_configuration.bastion-service-host.name - vpc_zone_identifier = var.subnets_asg + name_prefix = "${var.service_name}-asg" + max_size = var.asg_max + min_size = var.asg_min + desired_capacity = var.asg_desired + vpc_zone_identifier = var.subnets_asg + + mixed_instances_policy { + instances_distribution { + on_demand_base_capacity = var.on_demand_base_capacity + on_demand_percentage_above_base_capacity = 0 + } + + launch_template { + launch_template_specification { + launch_template_id = aws_launch_template.bastion-service-host.id + version = "$Latest" + } + + dynamic "override" { + for_each = var.bastion_instance_types + content { + instance_type = override.value + } + } + } + } + target_group_arns = concat( [aws_lb_target_group.bastion-service.arn], aws_lb_target_group.bastion-host.*.arn ) + enabled_metrics = var.autoscaling_group_enabled_metrics lifecycle { create_before_destroy = true } - tags = data.null_data_source.asg-tags.*.outputs + + dynamic "tag" { + for_each = merge(data.aws_default_tags.this.tags, var.tags) + content { + key = tag.key + value = tag.value + propagate_at_launch = true + } + } } #################################################### @@ -94,19 +136,3 @@ resource "aws_route53_record" "bastion_service" { evaluate_target_health = true } } - -#################################################### -# sample policy for parent account -################################################### - -data "template_file" "sample_policies_for_parent_account" { - count = local.assume_role_yes - template = file("${path.module}/sts_assumerole_example/policy_example.tpl") - - vars = { - aws_profile = var.aws_profile - bastion_allowed_iam_group = var.bastion_allowed_iam_group - assume_role_arn = var.assume_role_arn - } -} - diff --git a/outputs.tf b/outputs.tf index 25681c3..6ab7549 100644 --- a/outputs.tf +++ b/outputs.tf @@ -5,10 +5,9 @@ output "service_dns_entry" { output "policy_example_for_parent_account_empty_if_not_used" { description = "You must apply an IAM policy with trust relationship identical or compatible with this in your other AWS account for IAM lookups to function there with STS:AssumeRole and allow users to login" - value = join( - "", - data.template_file.sample_policies_for_parent_account.*.rendered, - ) + value = [ + local.assume_role_yes_bool ? local.sample_policies_for_parent_account : "" + ] } output "bastion_sg_id" { diff --git a/run_from_desktop.md b/run_from_desktop.md deleted file mode 100644 index 9d19eb7..0000000 --- a/run_from_desktop.md +++ /dev/null @@ -1,40 +0,0 @@ -## To Run (from desktop): - - If you are running this as a standalone plan then **You must _thoroughly_ reinitialise the terraform state before running the plan again in a different region of the same AWS account** Failure to do this will result in terraform destroying the IAM policies for the previous host. - -* Set aws-profile for first region -* Initialise backend (for remote state) - - - terraform init -backend-config=config/?/config.remote - - -* Apply terraform plan - - - terraform apply -var-file=config/?/config.tfvars - - -* next region (see note below) - - rm -rf .terraform - - -* Set aws-profile for next region -* init backend for next region - - - terraform init -backend -backend-config=config/?/config.remote - - -* run plan - - - terraform apply -var-file=config/?/config.tfvars - -**Note** -During terraform init there can be the question: -Do you want to copy existing state to the new backend? -Just say "no" -It is an issue when switching from different backend inside the same directory -As alternative before you run terraform init you can run "rm -rf .terraform" then this question will not popup \ No newline at end of file diff --git a/sts_assumerole_example/policy_example.tpl b/sts_assumerole_example/policy_example.tftpl similarity index 100% rename from sts_assumerole_example/policy_example.tpl rename to sts_assumerole_example/policy_example.tftpl diff --git a/user_data.tf b/user_data.tf index 629938b..593e9d5 100644 --- a/user_data.tf +++ b/user_data.tf @@ -1,53 +1,7 @@ ############################ -# Templates section +# User Data Templates combined ############################ -data "template_file" "systemd" { - template = file("${path.module}/user_data/systemd.tpl") - count = local.custom_systemd_no - - vars = { - bastion_host_name = local.bastion_host_name - vpc = var.vpc - } -} - -data "template_file" "ssh_populate_assume_role" { - count = local.assume_role_yes * local.custom_ssh_populate_no - template = file("${path.module}/user_data/ssh_populate_assume_role.tpl") - - vars = { - assume_role_arn = var.assume_role_arn - } -} - -data "template_file" "ssh_populate_same_account" { - count = local.assume_role_no * local.custom_ssh_populate_no - template = file("${path.module}/user_data/ssh_populate_same_account.tpl") -} - -data "template_file" "docker_setup" { - count = local.custom_docker_setup_no - template = file("${path.module}/user_data/docker_setup.tpl") - - vars = { - container_ubuntu_version = var.container_ubuntu_version - } -} - -data "template_file" "iam-authorized-keys-command" { - count = local.custom_authorized_keys_command_no - template = file("${path.module}/user_data/iam-authorized-keys-command.tpl") - - vars = { - authorized_command_code = file("${path.module}/user_data/iam_authorized_keys_code/main.go") - bastion_allowed_iam_group = var.bastion_allowed_iam_group - } -} - -############################ -# Templates combined section -############################ -data "template_cloudinit_config" "config" { +data "cloudinit_config" "config" { gzip = false base64_encode = false @@ -55,10 +9,7 @@ data "template_cloudinit_config" "config" { part { filename = "module_systemd" content_type = "text/x-shellscript" - content = element( - concat(data.template_file.systemd.*.rendered, ["#!/bin/bash"]), - 0, - ) + content = local.custom_systemd_no ? local.systemd : "#!/bin/bash" } # ssh_populate_assume_role @@ -66,13 +17,7 @@ data "template_cloudinit_config" "config" { filename = "module_ssh_populate_assume_role" content_type = "text/x-shellscript" merge_type = "str(append)" - content = element( - concat( - data.template_file.ssh_populate_assume_role.*.rendered, - ["#!/bin/bash"], - ), - 0, - ) + content = local.assume_role_yes * local.custom_ssh_populate_no != 0 ? local.ssh_populate_assume_role : "#!/bin/bash" } # ssh_populate_same_account @@ -80,13 +25,7 @@ data "template_cloudinit_config" "config" { filename = "module_ssh_populate_same_account" content_type = "text/x-shellscript" merge_type = "str(append)" - content = element( - concat( - data.template_file.ssh_populate_same_account.*.rendered, - ["#!/bin/bash"], - ), - 0, - ) + content = local.assume_role_no * local.custom_ssh_populate_no != 0 ? local.ssh_populate_same_account : "#!/bin/bash" } # docker_setup section @@ -94,10 +33,7 @@ data "template_cloudinit_config" "config" { filename = "module_docker_setup" content_type = "text/x-shellscript" merge_type = "str(append)" - content = element( - concat(data.template_file.docker_setup.*.rendered, ["#!/bin/bash"]), - 0, - ) + content = local.custom_docker_setup_no ? local.docker_setup : "#!/bin/bash" } # iam-authorized-keys-command @@ -105,13 +41,7 @@ data "template_cloudinit_config" "config" { filename = "module_iam-authorized-keys-command" content_type = "text/x-shellscript" merge_type = "str(append)" - content = element( - concat( - data.template_file.iam-authorized-keys-command.*.rendered, - ["#!/bin/bash"], - ), - 0, - ) + content = local.custom_authorized_keys_command_no ? local.iam_authorized_keys_command : "#!/bin/bash" } part { diff --git a/user_data/docker_setup.tpl b/user_data/docker_setup.tftpl similarity index 100% rename from user_data/docker_setup.tpl rename to user_data/docker_setup.tftpl diff --git a/user_data/iam-authorized-keys-command.tpl b/user_data/iam-authorized-keys-command.tftpl similarity index 100% rename from user_data/iam-authorized-keys-command.tpl rename to user_data/iam-authorized-keys-command.tftpl diff --git a/user_data/ssh_populate_assume_role.tpl b/user_data/ssh_populate_assume_role.tftpl similarity index 100% rename from user_data/ssh_populate_assume_role.tpl rename to user_data/ssh_populate_assume_role.tftpl diff --git a/user_data/ssh_populate_same_account.tpl b/user_data/ssh_populate_same_account.tftpl similarity index 100% rename from user_data/ssh_populate_same_account.tpl rename to user_data/ssh_populate_same_account.tftpl diff --git a/user_data/systemd.tpl b/user_data/systemd.tftpl similarity index 100% rename from user_data/systemd.tpl rename to user_data/systemd.tftpl diff --git a/variables.tf b/variables.tf index 80de050..7dedcd4 100755 --- a/variables.tf +++ b/variables.tf @@ -1,6 +1,6 @@ -variable "bastion_instance_type" { - description = "The virtual hardware to be used for the bastion service host" - default = "t2.micro" +variable "bastion_instance_types" { + description = "List of ec2 types for the bastion host, used by aws_launch_template (first from the list) and in aws_autoscaling_group" + default = ["t2.small", "t2.medium", "t2.large"] } variable "cidr_blocks_whitelist_host" { @@ -123,7 +123,7 @@ variable "aws_region" { } variable "aws_profile" { - default = "" + default = "" } variable "assume_role_arn" { @@ -203,3 +203,28 @@ variable "route53_fqdn" { default = "" } +variable "on_demand_base_capacity" { + default = 0 + description = "allows a base level of on demand when using spot" +} + +variable "delete_network_interface_on_termination" { + description = "if network interface created for bastion host should be deleted when instance in terminated. Setting propagated to aws_launch_template.network_interfaces.delete_on_termination" + default = true +} + +variable "bastion_ebs_size" { + description = "Size of EBS attached to the bastion instance" + default = 8 +} + +variable "bastion_ebs_device_name" { + description = "Name of bastion instance block device" + default = "xvda" +} + +variable "autoscaling_group_enabled_metrics" { + type = list(string) + description = "A list of CloudWatch metrics to collect on the autoscaling group. Permitted values include: GroupMinSize; GroupMaxSize; GroupDesiredCapacity; GroupInServiceInstances; GroupPendingInstances; GroupStandbyInstances; GroupTerminatingInstances; GroupTotalInstances" + default = [] +} diff --git a/versions.tf b/versions.tf index ec7ff25..f5d2b4b 100644 --- a/versions.tf +++ b/versions.tf @@ -1,15 +1,9 @@ terraform { - required_version = ">= 0.13" + required_version = ">= 0.15" required_providers { aws = { source = "hashicorp/aws" } - null = { - source = "hashicorp/null" - } - template = { - source = "hashicorp/template" - } } }
"t2.small",
"t2.medium",
"t2.large"
]