diff --git a/Dockerfile b/Dockerfile index d10e714..05e2b80 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,9 @@ FROM ubuntu/apache2 LABEL authors="barber" -RUN apt update && apt install -y libapache2-mod-auth-openidc ca-certificates && a2enmod auth_openidc proxy proxy_http proxy_wstunnel rewrite headers && \ +RUN apt update && apt install -y libapache2-mod-auth-openidc ca-certificates python3-boto3 && a2enmod auth_openidc proxy proxy_http proxy_html proxy_wstunnel substitute rewrite headers && \ sed -i 's/Listen 80/Listen 8080/' /etc/apache2/ports.conf -COPY write_site.sh /usr/local/bin/ +COPY write_site.py /usr/local/bin/ -CMD ["/bin/bash", "-c", "/usr/local/bin/write_site.sh && apache2-foreground"] +CMD ["/bin/bash", "-c", "python3 /usr/local/bin/write_site.py && apache2-foreground"] diff --git a/README.md b/README.md index 942da64..c859ecc 100644 --- a/README.md +++ b/README.md @@ -6,61 +6,99 @@ To enable/disable modules, update and change the HTTPD installation then there i The webservers default port is also 8080 to let it traverse the MCP NACL. -When deployed this terraform code creates an ECS cluster, with an EFS backend that then allows us to store apache configs -in a filesystem that wont vanish when it restarts. As such the EFS filesystem also needs a way to create new files, this is -done via a lambda function that writes valid apache config files to the EFS mount. +When deployed this terraform code creates an ECS cluster, with a baseline set of SSM parameters that other services can then extend with their own Apache HTTPD configurations. The configurations are pulled down and collated by the container on restart, so reloading of the configuration after changes is handled by triggering a lambda function. -A sample trigger: +A sample configuration snippet and trigger: ``` -variable "template" { - default = < + ProxyPass http://${var.mgmt_dns}/ + ProxyPassReverse http://${var.mgmt_dns}/ + ProxyPreserveHost On + FallbackResource /management/index.html + EOT } resource "aws_lambda_invocation" "demoinvocation2" { - function_name = "ZwUycV-unity-proxy-httpdproxymanagement" - - input = jsonencode({ - filename = "example_filename1", - template = var.template - }) - + function_name = "${var.project}-${var.venue}-httpdproxymanagement" } ``` -The config files are written as flat configs. They are then used inside a main apache2 config like this: + +The configuration is collated from SSM parameters residing under `/unity/${var.project}/${var.venue}/cs/management/proxy/configurations/`, and assembled like so: ``` - Include /etc/apache2/sites-enabled/mgmt.conf - ### ADD MORE HOSTS BELOW THIS LINE + +RewriteEngine on +RewriteCond %{HTTP:Upgrade} websocket [NC] +RewriteCond %{HTTP:Connection} upgrade [NC] +RewriteRule /management/(.*) ws://internal-unity-mc-alb-hzs9j-1269535099.us-west-2.elb.amazonaws.com:8080/$1 [P,L] + + ProxyPass http://internal-unity-mc-alb-hzs9j-1269535099.us-west-2.elb.amazonaws.com:8080/ + ProxyPassReverse http://internal-unity-mc-alb-hzs9j-1269535099.us-west-2.elb.amazonaws.com:8080/ + ProxyPreserveHost On + FallbackResource /management/index.html + ``` -They will be added as additional config files below the comment line. The httpd task is then restarted to allow the -config to then take effect. +Live checking of the "current" configuration may be accomplished with `write_site.py` in a local environment: +``` +% DEBUG=yes UNITY_PROJECT=btlunsfo UNITY_VENUE=dev11 python write_site.py + -There is currently no way to remove files or fix a broken config other than mounting the EFS mount into an EC2 server and making changes. -To do this you will need to edit the security group to allow access to the EC2 box and then install the EFS utils. +RewriteEngine on +RewriteCond %{HTTP:Upgrade} websocket [NC] +RewriteCond %{HTTP:Connection} upgrade [NC] +RewriteRule /management/(.*) ws://internal-unity-mc-alb-hzs9j-1269535099.us-west-2.elb.amazonaws.com:8080/$1 [P,L] + + ProxyPass http://internal-unity-mc-alb-hzs9j-1269535099.us-west-2.elb.amazonaws.com:8080/ + ProxyPassReverse http://internal-unity-mc-alb-hzs9j-1269535099.us-west-2.elb.amazonaws.com:8080/ + ProxyPreserveHost On + FallbackResource /management/index.html + -## Manually adding a file/template + -One can execute the httpdmanager lambda function directly with the following json syntax: +``` +This repository configures only one virtualhost (both open and close directives), but others may be added. This can be accomplished by simply adding more SSM parameters: ``` -{ - "filename": "example-extension", - "template": "SSLProxyEngine On\nProxyPreserveHost On\n\nProxyPass \/hub https:\/\/jupyter.us-west-2.elb.amazonaws.com:443\/hub\/\nProxyPassReverse \/hub https:\/\/jupyter.us-west-2.elb.amazonaws.com:443\/hub\/" +resource "aws_ssm_parameter" "managementproxy_openvirtualhost" { + name = "/unity/${var.project}/${var.venue}/cs/management/proxy/configurations/001-openvhost8080" + type = "String" + value = <<-EOT + +EOT } +resource "aws_ssm_parameter" "managementproxy_closevirtualhost" { + depends_on = [aws_ssm_parameter.managementproxy_openvirtualhost] + name = "/unity/${var.project}/${var.venue}/cs/management/proxy/configurations/100-closevhost8080" + type = "String" + value = <<-EOT + +EOT +} ``` +NOTE the names of each of these SSM parameters: + - 001-openvhost8080 + - 010-management + - 100-closevhost8080 -The template must be json encoded. I've used https://nddapp.com/json-encoder.html successfully. - +For additional virtualhosts, please pick an ordinal number range that is *greater* than 100 (e.g. 101-openTestHost, 120-closeTestHost). ## How do I know what to add in the 'template' file above? We are not perfect human beings. In order to iterate quickly on the above templat contents, we have created a development proxy environment that can be tested mostly locally. Check out the `develop` directory for instructions. diff --git a/lambda/lambda.py b/lambda/lambda.py index d0df6f9..d2eba0d 100644 --- a/lambda/lambda.py +++ b/lambda/lambda.py @@ -1,55 +1,23 @@ -import boto3 import os -def insert_new_host_line(file_path, new_line): - # Marker to find the position where the new line will be inserted - marker = "### ADD MORE HOSTS BELOW THIS LINE" - - # Read the original file content - with open(file_path, 'r') as file: - lines = file.readlines() - - # Find the marker and insert the new line after it - for i, line in enumerate(lines): - if marker in line: - # Insert new line after the marker line - lines.insert(i + 1, new_line + "\n") - break # Exit the loop once the marker is found and the line is inserted - - # Write the modified content back to the file - with open(file_path, 'w') as file: - file.writelines(lines) +import boto3 def lambda_handler(event, context): - filename = event.get('filename') - text_blob = event.get('template') - - efs_mount_path = '/mnt/efs' - file_path = os.path.join(efs_mount_path, filename+".conf") - with open(file_path, 'w') as file: - file.write(text_blob) - - - # Update main file - file_path = "/mnt/efs/main.conf" - new_line = " Include /etc/apache2/sites-enabled/"+filename+".conf" - insert_new_host_line(file_path, new_line) - # Restart an ECS task - ecs_client = boto3.client('ecs') - service_name = os.environ.get('SERVICE_NAME') - cluster_name = os.environ.get('CLUSTER_NAME') + ecs_client = boto3.client("ecs") + service_name = os.environ.get("SERVICE_NAME") + cluster_name = os.environ.get("CLUSTER_NAME") # List the tasks for a given cluster and service tasks_response = ecs_client.list_tasks( cluster=cluster_name, serviceName=service_name, # Use this if tasks are part of a service - desiredStatus='RUNNING' # Optional: Adjust this based on the task status you're interested in + desiredStatus="RUNNING", # Optional: Adjust this based on the task status you're interested in ) - task_arns = tasks_response.get('taskArns') + task_arns = tasks_response.get("taskArns") if task_arns: # Assuming you want to restart the first task in the list task_id = task_arns[0] @@ -57,11 +25,10 @@ def lambda_handler(event, context): # Stop the task (it should be restarted automatically if it's part of a service) ecs_client.stop_task(cluster=cluster_name, task=task_id) - return_message = 'File written and ECS task restarted' + return_message = "ECS task restarted" else: - return_message = 'No running tasks found for the specified service in the cluster' + return_message = ( + "No running tasks found for the specified service in the cluster" + ) - return { - 'statusCode': 200, - 'body': return_message - } \ No newline at end of file + return {"statusCode": 200, "body": return_message} diff --git a/terraform-unity/ecs.tf b/terraform-unity/ecs.tf index 8d4bbe0..08774aa 100644 --- a/terraform-unity/ecs.tf +++ b/terraform-unity/ecs.tf @@ -1,5 +1,5 @@ resource "aws_ecs_cluster" "httpd_cluster" { - name = "${var.deployment_name}-httpd-cluster" + name = "${var.project}-${var.venue}-httpd-cluster" tags = { Service = "U-CS" } @@ -9,34 +9,8 @@ data "aws_iam_policy" "mcp_operator_policy" { name = "mcp-tenantOperator-AMI-APIG" } -resource "aws_iam_policy" "efs_access" { - name = "${var.deployment_name}-EFSAccessPolicy" - description = "Policy for ECS tasks to access EFS" - - policy = jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Effect = "Allow", - Action = [ - "elasticfilesystem:ClientMount", - "elasticfilesystem:ClientWrite", - "elasticfilesystem:ClientRootAccess", - "elasticfilesystem:DescribeMountTargets", - ], - Resource = "*", - }, - ], - }) -} - -resource "aws_iam_role_policy_attachment" "efs_access_attachment" { - role = aws_iam_role.ecs_task_role.name - policy_arn = aws_iam_policy.efs_access.arn -} - resource "aws_iam_role" "ecs_task_role" { - name = "${var.deployment_name}-ecs_task_role" + name = "${var.project}-${var.venue}-ecs_task_role" assume_role_policy = jsonencode({ Version = "2012-10-17", @@ -54,8 +28,13 @@ resource "aws_iam_role" "ecs_task_role" { } +resource "aws_iam_role_policy_attachment" "ecs_ssm_role_policy" { + role = aws_iam_role.ecs_task_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess" +} + resource "aws_iam_role" "ecs_execution_role" { - name = "${var.deployment_name}ecs_execution_role" + name = "${var.project}-${var.venue}ecs_execution_role" assume_role_policy = jsonencode({ Version = "2012-10-17", @@ -78,9 +57,8 @@ resource "aws_iam_role_policy_attachment" "ecs_execution_role_policy" { policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" } - resource "aws_cloudwatch_log_group" "proxyloggroup" { - name = "/ecs/${var.deployment_name}-managementproxy" + name = "/ecs/${var.project}-${var.venue}-managementproxy" } resource "aws_ecs_task_definition" "httpd" { @@ -91,28 +69,19 @@ resource "aws_ecs_task_definition" "httpd" { execution_role_arn = aws_iam_role.ecs_execution_role.arn memory = "512" cpu = "256" - volume { - name = "httpd-config" - - efs_volume_configuration { - file_system_id = aws_efs_file_system.httpd_config_efs.id - transit_encryption = "ENABLED" - transit_encryption_port = 2049 - authorization_config { - access_point_id = aws_efs_access_point.httpd_config_ap.id - iam = "ENABLED" - } - } - } container_definitions = jsonencode([{ name = "httpd" - image = "ghcr.io/unity-sds/unity-proxy/httpd-proxy:0.13.0" + image = "ghcr.io/unity-sds/unity-proxy/httpd-proxy:${var.httpd_proxy_version}" environment = [ { - name = "ELB_DNS_NAME", - value = var.mgmt_dns + name = "UNITY_PROJECT", + value = var.project + }, + { + name = "UNITY_VENUE", + value = var.venue } ] logConfiguration = { @@ -129,12 +98,6 @@ resource "aws_ecs_task_definition" "httpd" { hostPort = 8080 } ] - mountPoints = [ - { - containerPath = "/etc/apache2/sites-enabled/" - sourceVolume = "httpd-config" - } - ] }]) tags = { Service = "U-CS" @@ -142,7 +105,7 @@ resource "aws_ecs_task_definition" "httpd" { } resource "aws_security_group" "ecs_sg" { - name = "${var.deployment_name}-ecs_service_sg" + name = "${var.project}-${var.venue}-ecs_service_sg" description = "Security group for ECS service" vpc_id = data.aws_ssm_parameter.vpc_id.value @@ -194,5 +157,6 @@ resource "aws_ecs_service" "httpd_service" { } depends_on = [ aws_lb_listener.httpd_listener, + aws_ssm_parameter.managementproxy_config ] } diff --git a/terraform-unity/efs.tf b/terraform-unity/efs.tf deleted file mode 100644 index c32259d..0000000 --- a/terraform-unity/efs.tf +++ /dev/null @@ -1,62 +0,0 @@ -resource "aws_efs_file_system" "httpd_config_efs" { - creation_token = "${var.deployment_name}-httpd-config" - tags = { - Service = "U-CS" - Name = "unity-proxy" - } -} -resource "aws_security_group" "efs_sg" { - name = "${var.deployment_name}-efs-security-group" - description = "Security group for EFS" - vpc_id = data.aws_ssm_parameter.vpc_id.value - - # Ingress rule to allow NFS - ingress { - from_port = 2049 - to_port = 2049 - protocol = "tcp" - security_groups = [aws_security_group.ecs_sg.id, aws_security_group.lambda_sg.id] - } - - # Egress rule - allowing all outbound traffic - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags = { - Service = "U-CS" - } -} -resource "aws_efs_mount_target" "efs_mount_target" { - for_each = toset(local.subnet_ids) - file_system_id = aws_efs_file_system.httpd_config_efs.id - subnet_id = each.value - security_groups = [aws_security_group.efs_sg.id] -} - - -resource "aws_efs_access_point" "httpd_config_ap" { - file_system_id = aws_efs_file_system.httpd_config_efs.id - - posix_user { - gid = 1000 - uid = 1000 - } - - root_directory { - path = "/efs" - creation_info { - owner_gid = 1000 - owner_uid = 1000 - permissions = "0777" - } - } - - tags = { - Name = "${var.deployment_name}-httpd-config-ap" - Service = "U-CS" - } -} \ No newline at end of file diff --git a/terraform-unity/lambda.tf b/terraform-unity/lambda.tf index 0565bf7..e38cde6 100644 --- a/terraform-unity/lambda.tf +++ b/terraform-unity/lambda.tf @@ -1,11 +1,11 @@ resource "aws_lambda_function" "httpdlambda" { - function_name = "${var.deployment_name}-httpdproxymanagement" + function_name = "${var.project}-${var.venue}-httpdproxymanagement" - filename = "${path.module}/lambda.zip" - handler = "lambda.lambda_handler" - runtime = "python3.8" + filename = "${path.module}/lambda.zip" + handler = "lambda.lambda_handler" + runtime = "python3.8" - role = aws_iam_role.lambda_iam_role.arn + role = aws_iam_role.lambda_iam_role.arn environment { variables = { @@ -14,12 +14,6 @@ resource "aws_lambda_function" "httpdlambda" { } } - # EFS configuration - file_system_config { - arn = aws_efs_access_point.httpd_config_ap.arn - local_mount_path = "/mnt/efs" # Lambda will access the EFS at this mount path - } - vpc_config { subnet_ids = local.subnet_ids security_group_ids = [aws_security_group.lambda_sg.id] @@ -29,7 +23,7 @@ resource "aws_lambda_function" "httpdlambda" { } } resource "aws_security_group" "lambda_sg" { - name = "${var.deployment_name}-httpd_lambda_sg" + name = "${var.project}-${var.venue}-httpd_lambda_sg" description = "Security group for httpd lambda service" vpc_id = data.aws_ssm_parameter.vpc_id.value @@ -58,7 +52,7 @@ resource "aws_security_group" "lambda_sg" { resource "aws_iam_role" "lambda_iam_role" { - name = "${var.deployment_name}-lambda_iam_role" + name = "${var.project}-${var.venue}-lambda_iam_role" assume_role_policy = jsonencode({ Version = "2012-10-17", @@ -76,38 +70,16 @@ resource "aws_iam_role" "lambda_iam_role" { } -resource "aws_iam_policy" "lambda_policy" { - name = "${var.deployment_name}-lambda_policy" - description = "A policy for the Lambda function to access EFS" - - policy = jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Action = [ - "elasticfilesystem:ClientMount", - "elasticfilesystem:ClientWrite", - ], - Effect = "Allow", - Resource = [ - aws_efs_file_system.httpd_config_efs.arn - ], - }, - ], - }) - -} - resource "aws_iam_policy" "lambda_ecs_stop_task_policy" { - name = "${var.deployment_name}-lambda_ecs_stop_task_policy" + name = "${var.project}-${var.venue}-lambda_ecs_stop_task_policy" description = "Allows Lambda functions to stop ECS tasks" policy = jsonencode({ Version = "2012-10-17", Statement = [ { - Effect = "Allow", - Action = ["ecs:ListTasks","ecs:StopTask"], + Effect = "Allow", + Action = ["ecs:ListTasks", "ecs:StopTask"], Resource = "*" } ] @@ -116,7 +88,7 @@ resource "aws_iam_policy" "lambda_ecs_stop_task_policy" { resource "aws_iam_policy" "lambda_vpc_access_policy" { - name = "${var.installprefix}-lambda_vpc_access_policy" + name = "${var.project}-${var.venue}-lambda_vpc_access_policy" description = "Allows Lambda functions to manage ENIs for VPC access" policy = jsonencode({ @@ -142,10 +114,6 @@ resource "aws_iam_role_policy_attachment" "lambda_base_policy_attachment" { role = aws_iam_role.lambda_iam_role.name policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" } -resource "aws_iam_role_policy_attachment" "lambda_policy_attachment" { - role = aws_iam_role.lambda_iam_role.name - policy_arn = aws_iam_policy.lambda_policy.arn -} resource "aws_iam_role_policy_attachment" "lambda_stop_task_policy_attachment" { role = aws_iam_role.lambda_iam_role.name @@ -156,7 +124,6 @@ resource "aws_ssm_parameter" "lambda_function_name" { name = "/unity/${var.project}/${var.venue}/cs/management/proxy/lambda-name" type = "String" value = aws_lambda_function.httpdlambda.function_name - overwrite = true } diff --git a/terraform-unity/networking.tf b/terraform-unity/networking.tf index 1ac6d91..7780a7e 100644 --- a/terraform-unity/networking.tf +++ b/terraform-unity/networking.tf @@ -1,19 +1,10 @@ -data "local_file" "unity_yaml" { - filename = "/home/ubuntu/.unity/unity.yaml" -} -locals { - unity_config = yamldecode(data.local_file.unity_yaml.content) - project = local.unity_config.project - venue = local.unity_config.venue -} - # Create an Application Load Balancer (ALB) resource "aws_lb" "httpd_alb" { - name = "${var.deployment_name}-httpd-alb" - internal = false - load_balancer_type = "application" - security_groups = [aws_security_group.ecs_sg.id] - subnets = local.public_subnet_ids + name = "${var.project}-${var.venue}-httpd-alb" + internal = false + load_balancer_type = "application" + security_groups = [aws_security_group.ecs_sg.id] + subnets = local.public_subnet_ids enable_deletion_protection = false tags = { Service = "U-CS" @@ -22,10 +13,10 @@ resource "aws_lb" "httpd_alb" { # Create a Target Group for httpd resource "aws_lb_target_group" "httpd_tg" { - name = "${var.deployment_name}-httpd-tg" - port = 8080 - protocol = "HTTP" - vpc_id = data.aws_ssm_parameter.vpc_id.value + name = "${var.project}-${var.venue}-httpd-tg" + port = 8080 + protocol = "HTTP" + vpc_id = data.aws_ssm_parameter.vpc_id.value target_type = "ip" health_check { @@ -57,10 +48,8 @@ resource "aws_lb_listener" "httpd_listener" { } } - resource "aws_ssm_parameter" "mgmt_endpoint" { - name = "/unity/${local.project}/${local.venue}/management/httpd/loadbalancer-url" - type = "String" + name = "/unity/${var.project}/${var.venue}/management/httpd/loadbalancer-url" + type = "String" value = "${aws_lb_listener.httpd_listener.protocol}://${aws_lb.httpd_alb.dns_name}:${aws_lb_listener.httpd_listener.port}/management/ui" - overwrite = true } diff --git a/terraform-unity/ssm.tf b/terraform-unity/ssm.tf new file mode 100644 index 0000000..2dd8cbe --- /dev/null +++ b/terraform-unity/ssm.tf @@ -0,0 +1,36 @@ +resource "aws_ssm_parameter" "managementproxy_openvirtualhost" { + name = "/unity/${var.project}/${var.venue}/cs/management/proxy/configurations/001-openvhost8080" + type = "String" + value = <<-EOT + +EOT +} + +resource "aws_ssm_parameter" "managementproxy_closevirtualhost" { + depends_on = [aws_ssm_parameter.managementproxy_openvirtualhost] + name = "/unity/${var.project}/${var.venue}/cs/management/proxy/configurations/100-closevhost8080" + type = "String" + value = <<-EOT + +EOT +} + +resource "aws_ssm_parameter" "managementproxy_config" { + depends_on = [aws_ssm_parameter.managementproxy_closevirtualhost] + name = "/unity/${var.project}/${var.venue}/cs/management/proxy/configurations/010-management" + type = "String" + value = <<-EOT + + RewriteEngine on + RewriteCond %%{HTTP:Upgrade} websocket [NC] + RewriteCond %%{HTTP:Connection} upgrade [NC] + RewriteRule /management/(.*) ws://${var.mgmt_dns}/$1 [P,L] + + ProxyPass http://${var.mgmt_dns}/ + ProxyPassReverse http://${var.mgmt_dns}/ + ProxyPreserveHost On + FallbackResource /management/index.html + + +EOT +} diff --git a/terraform-unity/terraform.tf b/terraform-unity/terraform.tf index 7fa934d..ad75335 100644 --- a/terraform-unity/terraform.tf +++ b/terraform-unity/terraform.tf @@ -11,7 +11,7 @@ data "aws_ssm_parameter" "subnet_list" { #} locals { - subnet_map = jsondecode(data.aws_ssm_parameter.subnet_list.value) - subnet_ids = nonsensitive(local.subnet_map["private"]) + subnet_map = jsondecode(data.aws_ssm_parameter.subnet_list.value) + subnet_ids = nonsensitive(local.subnet_map["private"]) public_subnet_ids = nonsensitive(local.subnet_map["public"]) -} \ No newline at end of file +} diff --git a/terraform-unity/variables.tf b/terraform-unity/variables.tf index a63aa10..e26b7d8 100644 --- a/terraform-unity/variables.tf +++ b/terraform-unity/variables.tf @@ -1,8 +1,10 @@ +# tflint-ignore: terraform_unused_declarations variable "tags" { description = "AWS Tags" - type = map(string) + type = map(string) } +# tflint-ignore: terraform_unused_declarations variable "deployment_name" { description = "The deployment name" type = string @@ -10,23 +12,30 @@ variable "deployment_name" { variable "mgmt_dns" { description = "The DNS or IP of the ALB or EC2 instance" - type = string + type = string } -variable "project"{ +variable "project" { description = "The unity project its installed into" - type = string - default = "UnknownProject" + type = string + default = "UnknownProject" } variable "venue" { description = "The unity venue its installed into" - type = string - default = "UnknownVenue" + type = string + default = "UnknownVenue" } +# tflint-ignore: terraform_unused_declarations variable "installprefix" { description = "The management console install prefix" - type = string - default = "UnknownPrefix" + type = string + default = "UnknownPrefix" +} + +variable "httpd_proxy_version" { + description = "The version of the httpd proxy container" + type = string + default = "0.15.0" } diff --git a/write_site.py b/write_site.py new file mode 100644 index 0000000..019a28f --- /dev/null +++ b/write_site.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +import os +from pathlib import Path + +import boto3 + +config_path = Path("/etc/apache2/sites-enabled") + + +def fetch_config_ssm(project, venue): + client = boto3.client("ssm") + parameters = client.get_parameters_by_path( + Path=f"/unity/{project}/{venue}/cs/management/proxy/configurations", + Recursive=True, + ParameterFilters=[ + { + "Key": "Type", + "Values": [ + "String", + ], + }, + ], + WithDecryption=False, + ) + return parameters["Parameters"] + + +def template_file(parameters, debug): + # sort the parameters by the ssm param name, and then make a list of just + # their values for insertion + param_config = [ + parm["Value"] for parm in sorted(parameters, key=lambda x: x["Name"]) + ] + if debug: # so we can debug what SSM says it should/will be + for ln in param_config: + print(ln) + else: # otherwise, write them all to the config file + with open(config_path / "main.conf", "w") as file: + file.writelines(param_config) + + +if __name__ == "__main__": + if os.getenv("UNITY_PROJECT") and os.getenv("UNITY_VENUE"): + template_file( + fetch_config_ssm(os.getenv("UNITY_PROJECT"), os.getenv("UNITY_VENUE")), + os.getenv("DEBUG"), + ) + else: + print("Both UNITY_PROJECT and UNITY_VENUE must be set, quitting") + exit(1) diff --git a/write_site.sh b/write_site.sh deleted file mode 100755 index ea64e8e..0000000 --- a/write_site.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -# Define the EFS mount point -efs_mount_point="/etc/apache2/sites-enabled" - -# Check if the EFS mount point exists -if [ ! -d "$efs_mount_point" ]; then - echo "EFS mount point not found: $efs_mount_point" - exit 1 -fi - -# File to be written -file_path="$efs_mount_point/mgmt.conf" -main_path="$efs_mount_point/main.conf" -# Ensure the ELB_DNS_NAME environment variable is set -if [ -z "$ELB_DNS_NAME" ]; then - echo "ELB_DNS_NAME environment variable is not set" - exit 1 -fi - -# VirtualHost template with placeholder for DNS_NAME -vhost_template=' -RewriteEngine on -ProxyPass /management/ http:/// -ProxyPassReverse /management/ http:/// -ProxyPreserveHost On -RewriteCond %{HTTP:Upgrade} websocket [NC] -RewriteCond %{HTTP:Connection} upgrade [NC] -RewriteRule /management/(.*) ws:///$1 [P,L] - -FallbackResource /management/index.html -' - -# Replace with actual DNS name -vhost_config="${vhost_template///$ELB_DNS_NAME}" - -# Write the configuration to the file -echo "$vhost_config" > "$file_path" - -echo "VirtualHost configuration written to: $file_path" - -main_template=' - - Include /etc/apache2/sites-enabled/mgmt.conf - ### ADD MORE HOSTS BELOW THIS LINE - - -' - -echo "$main_template" > "$main_path" -echo "Main configuration written to: $main_path" - -chown 1000:1000 /etc/apache2/sites-enabled/main.conf -chmod 755 /etc/apache2/sites-enabled/main.conf \ No newline at end of file